repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
jiangzhonglian/MachineLearning
|
src/py3.x/ml/3.DecisionTree/DTSklearn.py
|
1
|
3999
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 原始链接: http://blog.csdn.net/lsldd/article/details/41223147
# GitHub: https://github.com/apachecn/AiLearning
import numpy as np
from sklearn import tree
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
def createDataSet():
''' 数据读入 '''
data = []
labels = []
with open("data/3.DecisionTree/data.txt") as ifile:
for line in ifile:
# 特征: 身高 体重 label: 胖瘦
tokens = line.strip().split(' ')
data.append([float(tk) for tk in tokens[:-1]])
labels.append(tokens[-1])
# 特征数据
x = np.array(data)
# label分类的标签数据
labels = np.array(labels)
# 预估结果的标签数据
y = np.zeros(labels.shape)
''' 标签转换为0/1 '''
y[labels == 'fat'] = 1
print(data, '-------', x, '-------', labels, '-------', y)
return x, y
def predict_train(x_train, y_train):
'''
使用信息熵作为划分标准,对决策树进行训练
参考链接: http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier
'''
clf = tree.DecisionTreeClassifier(criterion='entropy')
# print(clf)
clf.fit(x_train, y_train)
''' 系数反映每个特征的影响力。越大表示该特征在分类中起到的作用越大 '''
print('feature_importances_: %s' % clf.feature_importances_)
'''测试结果的打印'''
y_pre = clf.predict(x_train)
# print(x_train)
print(y_pre)
print(y_train)
print(np.mean(y_pre == y_train))
return y_pre, clf
def show_precision_recall(x, y, clf, y_train, y_pre):
'''
准确率与召回率
参考链接: http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve
'''
precision, recall, thresholds = precision_recall_curve(y_train, y_pre)
# 计算全量的预估结果
answer = clf.predict_proba(x)[:, 1]
'''
展现 准确率与召回率
precision 准确率
recall 召回率
f1-score 准确率和召回率的一个综合得分
support 参与比较的数量
参考链接:http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html#sklearn.metrics.classification_report
'''
# target_names 以 y的label分类为准
target_names = ['thin', 'fat']
print(classification_report(y, answer, target_names=target_names))
print(answer)
print(y)
def show_pdf(clf):
'''
可视化输出
把决策树结构写入文件: http://sklearn.lzjqsdd.com/modules/tree.html
Mac报错:pydotplus.graphviz.InvocationException: GraphViz's executables not found
解决方案:sudo brew install graphviz
参考写入: http://www.jianshu.com/p/59b510bafb4d
'''
# with open("testResult/tree.dot", 'w') as f:
# from sklearn.externals.six import StringIO
# tree.export_graphviz(clf, out_file=f)
import pydotplus
from sklearn.externals.six import StringIO
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("../../../output/3.DecisionTree/tree.pdf")
# from IPython.display import Image
# Image(graph.create_png())
if __name__ == '__main__':
x, y = createDataSet()
''' 拆分训练数据与测试数据, 80%做训练 20%做测试 '''
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
print('拆分数据:', x_train, x_test, y_train, y_test)
# 得到训练的预测结果集
y_pre, clf = predict_train(x_train, y_train)
# 展现 准确率与召回率
show_precision_recall(x, y, clf, y_train, y_pre)
# 可视化输出
show_pdf(clf)
|
gpl-3.0
|
stuliveshere/SeismicProcessing2015
|
prac1_staff/toolbox/toolbox.py
|
1
|
12170
|
import numpy as np
import matplotlib.pyplot as pylab
from matplotlib.widgets import Slider
pylab.rcParams['image.interpolation'] = 'sinc'
#==================================================
# decorators
#==================================================
def io(func):
'''
an io decorator that allows
input/output to be either a filename
(i.e. a string) or an array
'''
def wrapped(*args, **kwargs) :
if type(args[0]) == type(''):
workspace = read(args[0])
else:
workspace = args[0]
result = func(workspace, **kwargs)
if type(result) != type(None):
if type(args[1]) == type(''):
return write(result, args[1])
else:
return result
return wrapped
#==================================================
# display tools
#==================================================
class KeyHandler(object):
def __init__(self, fig, ax, dataset, kwargs):
self.fig = fig
self.ax = ax
self.kwargs = kwargs
self.dataset = dataset
self.start = 0
if kwargs['primary'] == None:
self.slice = self.dataset
else:
keys = np.unique(dataset[kwargs['primary']])
self.keys = keys[::kwargs['step']]
self.nkeys = self.keys.size
self.ensemble()
if 'clip' in kwargs and kwargs['clip'] != 0:
self.clip = kwargs['clip']
else:
self.clip = np.median(np.abs(self.dataset['trace']))
print 'PySeis Seismic Viewer'
print 'type "h" for help'
self.draw()
def __call__(self, e):
print e.xdata, e.ydata
if e.key == "right":
self.start += 1
self.ensemble()
elif e.key == "left":
self.start -= 1
self.ensemble()
elif e.key == "up":
self.clip /= 1.1
print self.clip
elif e.key == "down":
self.clip *= 1.1
print self.clip
elif e.key == "h":
print "right arrow: next gather"
print "left arrow: last gather"
print "up arrow: hotter"
print "down arrow: colder"
print "clip=", self.clip
else:
return
self.draw()
def draw(self):
try:
if self.kwargs['wiggle'] == True: self.wiggle()
else: self.ximage()
except KeyError: self.ximage()
def ximage(self):
self.ax.cla()
self.im = self.ax.imshow(self.slice['trace'].T, aspect='auto', cmap='Greys', vmax =self.clip, vmin=-1*self.clip)
try:
self.ax.set_title('%s = %d' %(self.kwargs['primary'], self.keys[self.start]))
except AttributeError:
pass
self.fig.tight_layout()
self.fig.canvas.draw()
def wiggle(self, scale=0.05):
self.ax.cla()
frame = self.slice
ns = frame['ns'][0]
nt = frame.size
scalar = scale*frame.size/(frame.size*self.clip) #scales the trace amplitudes relative to the number of traces
frame['trace'][:,-1] = np.nan #set the very last value to nan. this is a lazy way to prevent wrapping
vals = frame['trace'].ravel() #flat view of the 2d array.
vect = np.arange(vals.size).astype(np.float) #flat index array, for correctly locating zero crossings in the flat view
crossing = np.where(np.diff(np.signbit(vals)))[0] #index before zero crossing
#use linear interpolation to find the zero crossing, i.e. y = mx + c.
x1= vals[crossing]
x2 = vals[crossing+1]
y1 = vect[crossing]
y2 = vect[crossing+1]
m = (y2 - y1)/(x2-x1)
c = y1 - m*x1
#tack these values onto the end of the existing data
x = np.hstack([vals, np.zeros_like(c)])
y = np.hstack([vect, c])
#resort the data
order = np.argsort(y)
#shift from amplitudes to plotting coordinates
x_shift, y = y[order].__divmod__(ns)
self.ax.plot(x[order] *scalar + x_shift + 1, y, 'k')
x[x<0] = np.nan
x = x[order] *scalar + x_shift + 1
self.ax.fill(x,y, 'k', aa=True)
self.ax.set_xlim([0,nt])
self.ax.set_ylim([ns,0])
try:
self.ax.set_title('%s = %d' %(self.kwargs['primary'], self.keys[self.start]))
except AttributeError:
pass
self.fig.tight_layout()
self.fig.canvas.draw()
def ensemble(self):
try:
self.slice = self.dataset[self.dataset[self.kwargs['primary']] == self.keys[self.start]]
except IndexError:
self.start = 0
@io
def display(dataset, **kwargs):
'''
iterates through dataset using
left and right keys
parameters required:
primary key
seconary key
step size
'''
fig = pylab.figure()
ax = fig.add_subplot(111)
eventManager = KeyHandler(fig, ax, dataset, kwargs)
fig.canvas.mpl_connect('key_press_event',eventManager)
def scan(dataset):
print " %0-35s: %0-15s %s" %('key', 'min', 'max')
print "========================================="
for key in np.result_type(dataset).descr:
a = np.amin(dataset[key[0]])
b = np.amax(dataset[key[0]])
if (a != 0) and (b != 0):
print "%0-35s %0-15.3f %.3f" %(key, a, b)
print "========================================="
#~ def build_vels(times, velocities, ns=1000, dt=0.001):
#~ '''builds a full velocity trace from a list of vels and times'''
#~ tx = np.linspace(dt, dt*ns, ns)
#~ vels = np.interp(tx, times, velocities)
#~ vels = np.pad(vels, (100,100), 'reflect')
#~ vels = np.convolve(np.ones(100.0)/100.0, vels, mode='same')
#~ vels = vels[100:-100]
#~ return vels
@io
def cp(workspace, **params):
return workspace
@io
def agc(workspace, window=100, **params):
'''
automatic gain control
inputs:
window
'''
vec = np.ones(window, 'f')
func = np.apply_along_axis(lambda m: np.convolve(np.abs(m), vec, mode='same'), axis=-1, arr=workspace['trace'])
workspace['trace'] /= func
workspace['trace'][~np.isfinite(workspace['trace'])] = 0
workspace['trace'] /= np.amax(np.abs(workspace['trace']))
return workspace
def ricker(f, length=0.512, dt=0.001):
t = np.linspace(-length/2, (length-dt)/2, length/dt)
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
y = np.around(y, 10)
inds = np.nonzero(y)[0]
return y[np.amin(inds):np.amax(inds)]
def conv(workspace, wavelet):
workspace['trace'] = np.apply_along_axis(lambda m: np.convolve(m, wavelet, mode='same'), axis=-1, arr=workspace['trace'])
return workspace
@io
def fx(workspace, **params):
f = np.abs(np.fft.rfft(workspace['trace'], axis=-1))
correction = np.mean(np.abs(f), axis=-1).reshape(-1,1)
f /= correction
f = 20.0*np.log10(f)[:,::-1]
freq = np.fft.rfftfreq(params['ns'], params['dt'])
print params['ns'], params['dt']
hmin = np.amin(workspace['cdp'])
hmax = np.amax(workspace['cdp'])
vmin = np.amin(freq)
vmax = np.amax(freq)
extent=[hmin,hmax,vmin,vmax]
pylab.imshow(f.T, aspect='auto', extent=extent)
def db(data):
return 20.0*np.log10(data)
@io
def slice(workspace, **params):
ns = params['ns']
newtype = typeSU(ns)
new = workspace.astype(newtype)
new['ns'] = ns
return new
import numpy as np
su_header_dtype = np.dtype([
('tracl', np.int32),
('tracr', np.int32),
('fldr', np.int32),
('tracf', np.int32),
('ep', np.int32),
('cdp', np.int32),
('cdpt', np.int32),
('trid', np.int16),
('nvs', np.int16),
('nhs', np.int16),
('duse', np.int16),
('offset', np.int32),
('gelev', np.int32),
('selev', np.int32),
('sdepth', np.int32),
('gdel', np.int32),
('sdel', np.int32),
('swdep', np.int32),
('gwdep', np.int32),
('scalel', np.int16),
('scalco', np.int16),
('sx', np.int32),
('sy', np.int32),
('gx', np.int32),
('gy', np.int32),
('counit', np.int16),
('wevel', np.int16),
('swevel', np.int16),
('sut', np.int16),
('gut', np.int16),
('sstat', np.int16),
('gstat', np.int16),
('tstat', np.int16),
('laga', np.int16),
('lagb', np.int16),
('delrt', np.int16),
('muts', np.int16),
('mute', np.int16),
('ns', np.uint16),
('dt', np.uint16),
('gain', np.int16),
('igc', np.int16),
('igi', np.int16),
('corr', np.int16),
('sfs', np.int16),
('sfe', np.int16),
('slen', np.int16),
('styp', np.int16),
('stas', np.int16),
('stae', np.int16),
('tatyp', np.int16),
('afilf', np.int16),
('afils', np.int16),
('nofilf', np.int16),
('nofils', np.int16),
('lcf', np.int16),
('hcf', np.int16),
('lcs', np.int16),
('hcs', np.int16),
('year', np.int16),
('day', np.int16),
('hour', np.int16),
('minute', np.int16),
('sec', np.int16),
('timebas', np.int16),
('trwf', np.int16),
('grnors', np.int16),
('grnofr', np.int16),
('grnlof', np.int16),
('gaps', np.int16),
('otrav', np.int16), #179,180
('d1', np.float32), #181,184
('f1', np.float32), #185,188
('d2', np.float32), #189,192
('f2', np.float32), #193, 196
('ShotPoint', np.int32), #197,200
('unscale', np.int16), #201, 204
('TraceValueMeasurementUnit', np.int16),
('TransductionConstantMantissa', np.int32),
('TransductionConstantPower', np.int16),
('TransductionUnit', np.int16),
('TraceIdentifier', np.int16),
('ScalarTraceHeader', np.int16),
('SourceType', np.int16),
('SourceEnergyDirectionMantissa', np.int32),
('SourceEnergyDirectionExponent', np.int16),
('SourceMeasurementMantissa', np.int32),
('SourceMeasurementExponent', np.int16),
('SourceMeasurementUnit', np.int16),
('UnassignedInt1', np.int32),
('ns1', np.int32),
])
def typeSU(ns):
return np.dtype(su_header_dtype.descr + [('trace', ('<f4',ns))])
def readSUheader(filename):
raw = open(filename, 'rb').read()
return np.fromstring(raw, dtype=su_header_dtype, count=1)
def read(filename=None):
if filename == None:
raw= sys.stdin.read()
else:
raw = open(filename, 'rb').read()
return readData(raw)
def readData(raw):
su_header = np.fromstring(raw, dtype=su_header_dtype, count=1)
ns = su_header['ns'][0]
file_dtype = typeSU(ns)
data = np.fromstring(raw, dtype=file_dtype)
return data
def write(data, filename=None):
if filename == None:
data.tofile(sys.stdout)
else:
data.tofile(filename)
|
mit
|
mugizico/scikit-learn
|
examples/decomposition/plot_ica_vs_pca.py
|
306
|
3329
|
"""
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
|
bsd-3-clause
|
bpinsard/PySurfer
|
setup.py
|
4
|
3134
|
#! /usr/bin/env python
#
# Copyright (C) 2011-2014 Alexandre Gramfort
# Michael Waskom
# Scott Burns
# Martin Luessi
# Eric Larson
descr = """PySurfer: cortical surface visualization using Python."""
import os
# deal with MPL sandbox violations during easy_install
os.environ['MPLCONFIGDIR'] = '.'
# get the version, don't import surfer here so setup works on headless systems
version = None
with open(os.path.join('surfer', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('"')
break
if version is None:
raise RuntimeError('Could not determine version')
DISTNAME = 'pysurfer'
DESCRIPTION = descr
LONG_DESCRIPTION = descr
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = 'mwaskom@stanford.edu'
URL = 'http://pysurfer.github.com'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/nipy/PySurfer'
VERSION = version
def check_dependencies():
needed_deps = ["IPython",
"numpy", "scipy", "matplotlib",
"mayavi",
]
missing_deps = []
for dep in needed_deps:
try:
__import__(dep)
except ImportError:
missing_deps.append(dep)
if missing_deps:
missing = ", ".join(missing_deps)
raise ImportError("Missing dependencies: %s" % missing)
from setuptools import setup
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
import sys
if not (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'egg_info',
'clean'))):
check_dependencies()
setup(name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=True,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
platforms='any',
packages=['surfer', 'surfer.tests'],
scripts=['bin/pysurfer'],
install_requires=['nibabel >= 1.2'],
)
|
bsd-3-clause
|
GoogleCloudPlatform/professional-services-data-validator
|
samples/functions/main.py
|
1
|
1319
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas
import json
from yaml import dump, load, Dumper, Loader
from data_validation.data_validation import DataValidation
def _clean_dataframe(df):
rows = df.to_dict(orient="record")
for row in rows:
for key in row:
if type(row[key]) in [pandas.Timestamp]:
row[key] = str(row[key])
return json.dumps(rows)
def main(request):
""" Handle incoming Data Validation requests.
request (flask.Request): HTTP request object.
"""
try:
config = request.json["config"]
validator = DataValidation(config)
df = validator.execute()
return _clean_dataframe(df)
except Exception as e:
return "Unknown Error: {}".format(e)
|
apache-2.0
|
rdipietro/tensorflow
|
tensorflow/examples/learn/text_classification_character_rnn.py
|
8
|
3322
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using recurrent neural networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(features, target):
"""Character level recurrent neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.ont_hot(features, 256, 1, 0)
byte_list = tf.unstack(byte_list, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.rnn(cell, byte_list, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
hetland/xray
|
xray/test/test_formatting.py
|
6
|
3865
|
import numpy as np
import pandas as pd
from xray.core import formatting
from xray.core.pycompat import PY3
from . import TestCase
class TestFormatting(TestCase):
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),)),
((3, 20,), (0, slice(10))),
((2, 10,), (0, slice(10))),
((2, 5,), (slice(2), slice(None))),
((1, 2, 5,), (0, slice(2), slice(None))),
((2, 3, 5,), (0, slice(2), slice(None))),
((1, 10, 1,), (0, slice(10), slice(None))),
((2, 5, 1,), (slice(2), slice(None), slice(None))),
((2, 5, 3,), (0, slice(4), slice(None))),
((2, 3, 3,), (slice(2), slice(None), slice(None))),
]
for shape, expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10)
self.assertEqual(expected, actual)
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
self.assertItemsEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'at least one item'):
formatting.first_n_items(array, 0)
def test_format_item(self):
cases = [
(pd.Timestamp('2000-01-01T12'), '2000-01-01T12:00:00'),
(pd.Timestamp('2000-01-01'), '2000-01-01'),
(pd.Timestamp('NaT'), 'NaT'),
(pd.Timedelta('10 days 1 hour'), '10 days 01:00:00'),
(pd.Timedelta('-3 days'), '-3 days +00:00:00'),
(pd.Timedelta('3 hours'), '0 days 03:00:00'),
(pd.Timedelta('NaT'), 'NaT'),
('foo', "'foo'"),
(u'foo', "'foo'" if PY3 else "u'foo'"),
(b'foo', "b'foo'" if PY3 else "'foo'"),
(1, '1'),
(1.0, '1.0'),
]
for item, expected in cases:
actual = formatting.format_item(item)
self.assertEqual(expected, actual)
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, 'D'),
'0 days 1 days 2 days 3 days'),
(np.arange(4) * np.timedelta64(3, 'h'),
'00:00:00 03:00:00 06:00:00 09:00:00'),
(np.arange(4) * np.timedelta64(500, 'ms'),
'00:00:00 00:00:00.500000 00:00:01 00:00:01.500000'),
(pd.to_timedelta(['NaT', '0s', '1s', 'NaT']),
'NaT 00:00:00 00:00:01 NaT'),
(pd.to_timedelta(['1 day 1 hour', '1 day', '0 hours']),
'1 days 01:00:00 1 days 00:00:00 0 days 00:00:00'),
([1, 2, 3], '1 2 3'),
]
for item, expected in cases:
actual = ' '.join(formatting.format_items(item))
self.assertEqual(expected, actual)
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 13)
expected = '0 1 2 3 4 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = '0.0 1.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 1)
expected = '0.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(3), 5)
expected = '0 1 2'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = '0.0 1.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(4), 0)
expected = '0 ...'
self.assertEqual(expected, actual)
def test_pretty_print(self):
self.assertEqual(formatting.pretty_print('abcdefghij', 8), 'abcde...')
|
apache-2.0
|
ranaroussi/fix-yahoo-finance
|
yfinance/__init__.py
|
1
|
1330
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = "0.1.54"
__author__ = "Ran Aroussi"
from .ticker import Ticker
from .tickers import Tickers
from .multi import download
def pdr_override():
"""
make pandas datareader optional
otherwise can be called via fix_yahoo_finance.download(...)
"""
try:
import pandas_datareader
pandas_datareader.data.get_data_yahoo = download
pandas_datareader.data.get_data_yahoo_actions = download
pandas_datareader.data.DataReader = download
except Exception:
pass
__all__ = ['download', 'Ticker', 'Tickers', 'pdr_override']
|
apache-2.0
|
mblondel/scikit-learn
|
examples/decomposition/plot_faces_decomposition.py
|
204
|
4452
|
"""
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
|
bsd-3-clause
|
decvalts/landlab
|
landlab/plot/video_out.py
|
1
|
8077
|
#! /usr/bin/env python
"""
This component allows creation of mp4 animations of output from Landlab.
It does so by stitching together output from the conventional Landlab
static plotting routines from plot/imshow.py.
It is compatible with all Landlab grids, though cannot handle an evolving grid
as yet.
Initialize the video object vid at the start of your code, then simply call
vid.add_frame(grid, data) each time you want to add a frame. At the end of
the model run, call vid.produce_video().
CAUTION: This component may prove *very* memory-intensive. It is recommended
that the total number of frames included in the output multiplied by the
number of pixels (nodes) in the image not exceed XXXXXXXXX.
Due to some issues with codecs in matplotlib, at the moment on .gif output
movies are recommended. If this irritates you, you can modify your own
PYTHONPATH to allow .mp4 compilation (try a google search for the warning raised
by this method for some hints). These (known) issues are apparently likely to
resolve themselves in a future release of matplotlib.
"""
import six
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from landlab.plot import imshow
class VideoPlotter(object):
def __init__(self, grid, data_centering='node', start=None, stop=None, step=None):
self.initialize(grid, data_centering, start, stop, step)
def initialize(self, grid, data_centering, start, stop, step):
"""
A copy of the grid is required.
*data_centering* controls the type of data the video will be plotting.
It can be set to:
'node' (default)
'active_node'
'core_node'
'cell'
'active_cell'
Start, stop, and step control when a frame is added. They are absolute
times in the model run. All are optional.
"""
options_for_data_centering = ['node',
'active_node',
'core_node',
'cell',
'active_cell']
assert data_centering in options_for_data_centering, 'data_centering is not a valid type!'
self.grid = grid
#self.image_list = []
self.data_list = []
self.last_remainder = float('inf') #this controls the intervals at which to plot
self.last_t = float('-inf')
if start is None:
start = float('-inf')
if stop is None:
stop = float('inf')
self.step_control_tuple = (start,stop,step)
#initialize the plots for the vid
if data_centering=='node':
self.centering = 'n'
self.plotfunc = imshow.imshow_node_grid
elif data_centering=='active_node':
self.centering = 'n'
self.plotfunc = imshow.imshow_active_node_grid
elif data_centering=='core_node':
self.centering = 'n'
self.plotfunc = imshow.imshow_core_node_grid
elif data_centering=='cell':
self.centering = 'c'
self.plotfunc = imshow.imshow_cell_grid
else:
self.centering = 'c'
self.plotfunc = imshow.imshow_active_cell_grid
self.randomized_name = "my_animation_"+str(int(np.random.random()*10000))
self.fig = plt.figure(self.randomized_name) #randomized name
def add_frame(self, grid, data, elapsed_t, **kwds):
"""
data can be either the data to plot (nnodes, or appropriately lengthed
numpy array), or a string for grid field access.
kwds can be any of the usual plotting keywords, e.g., cmap.
"""
if type(data)==str:
if self.centering=='n':
data_in = grid.at_node[data]
elif self.centering=='c':
data_in = grid.at_cell[data]
else:
data_in = data
self.kwds = kwds
if self.last_t<elapsed_t:
try:
normalized_elapsed_t = elapsed_t - self.start_t
except AttributeError:
self.start_t = elapsed_t
normalized_elapsed_t = 0.
else: #time has apparently gone "backwards"; reset the module
#...note a *forward* jump in time wouldn't register
self.clear_module()
self.start_t = elapsed_t
normalized_elapsed_t = 0.
if self.step_control_tuple[0]<=elapsed_t<self.step_control_tuple[1]: #we're between start & stop
if not self.step_control_tuple[2]: #no step provided
six.print_('Adding frame to video at elapsed time %f' % elapsed_t)
self.data_list.append(data_in.copy())
else:
excess_fraction = normalized_elapsed_t%self.step_control_tuple[2]
# Problems with rounding errors make this double check
# necessary
if excess_fraction < self.last_remainder or np.allclose(excess_fraction, self.step_control_tuple[2]):
six.print_('Adding frame to video at elapsed time %f' % elapsed_t)
self.data_list.append(data_in.copy())
self.last_remainder = excess_fraction
self.last_t = elapsed_t
def produce_video(self, interval=200, repeat_delay=2000, filename='video_output.gif', override_min_max=None):
"""
Finalize and save the video of the data.
interval and repeat_delay are the interval between frames and the repeat
delay before restart, both in milliseconds.
filename is the name of the file to save in the present working
directory. At present, only .gifs will implement reliably without
tweaking Python's PATHs.
override_min_max allows the user to set their own maximum and minimum
for the scale on the plot. Use a len-2 tuple, (min, max).
"""
six.print_("Assembling video output, may take a while...")
plt.figure(self.randomized_name)
#find the limits for the plot:
if not override_min_max:
self.min_limit = np.amin(self.data_list[0])
self.max_limit = np.amax(self.data_list[0])
assert len(self.data_list) > 1, 'You must include at least two frames to make an animation!'
for i in self.data_list[1:]: #assumes there is more than one frame in the loop
self.min_limit = min((self.min_limit, np.amin(i)))
self.max_limit = max((self.max_limit, np.amax(i)))
else:
self.min_limit=override_min_max[0]
self.max_limit=override_min_max[1]
self.fig.colorbar(self.plotfunc(self.grid, self.data_list[0],limits=(self.min_limit,self.max_limit),allow_colorbar=False, **self.kwds))
ani = animation.FuncAnimation(self.fig, _make_image, frames=self._yield_image, interval=interval, blit=True, repeat_delay=repeat_delay)
ani.save(filename, fps=1000./interval)
plt.close()
def _yield_image(self):
"""
Helper function designed to generate image_list items for plotting,
rather than storing them all.
"""
for i in self.data_list:
#yield self.grid.node_vector_to_raster(i)
yield (i, self.plotfunc, (self.min_limit, self.max_limit), self.grid, self.kwds)
def clear_module(self):
"""
Wipe all internally held data that would cause trouble if module
were to be rerun without being reinstantiated.
"""
self.data_list = []
def _make_image(yielded_tuple):
yielded_raster_data = yielded_tuple[0]
plotfunc = yielded_tuple[1]
limits_in = yielded_tuple[2]
grid = yielded_tuple[3]
kwds = yielded_tuple[4]
im = plotfunc(grid, yielded_raster_data, limits=limits_in, allow_colorbar=False, **kwds)
return im
|
mit
|
glenngillen/dotfiles
|
.vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/pydev_ipython/qt_for_kernel.py
|
2
|
3698
|
""" Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api, QT_API_PYQT5)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
# #PyDev-779: In pysrc/pydev_ipython/qt_for_kernel.py, matplotlib_options should be replaced with latest from ipython
# (i.e.: properly check backend to decide upon qt4/qt5).
backend = mpl.rcParams.get('backend', None)
if backend == 'Qt4Agg':
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt4v2':
return [QT_API_PYQT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
elif backend == 'Qt5Agg':
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
mpqt)
# Fallback without checking backend (previous code)
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for qt backend from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
|
mit
|
astocko/statsmodels
|
statsmodels/nonparametric/_kernel_base.py
|
29
|
18238
|
"""
Module containing the base object for multivariate kernel density and
regression, plus some utilities.
"""
from statsmodels.compat.python import range, string_types
import copy
import numpy as np
from scipy import optimize
from scipy.stats.mstats import mquantiles
try:
import joblib
has_joblib = True
except ImportError:
has_joblib = False
from . import kernels
kernel_func = dict(wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
gaussian=kernels.gaussian,
aitchison_aitken_reg = kernels.aitchison_aitken_reg,
wangryzin_reg = kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian)
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from .kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from .kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from .kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub
class GenericKDE (object):
"""
Base class for density estimation and regression KDE classes.
"""
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(normal_reference=self._normal_reference,
cv_ml=self._cv_ml, cv_ls=self._cv_ls)
if bw is None:
bwfunc = self.bw_func['normal_reference']
return bwfunc()
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data)
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
if bw is None:
self._bw_method = 'normal_reference'
if isinstance(bw, string_types):
self._bw_method = bw
else:
self._bw_method = "user-specified"
return bw
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs) \
(joblib.delayed(_compute_subset) \
(class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in range(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in range(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1]))
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw
def _cv_ml(self):
"""
Returns the cross validation maximum likelihood bandwidth parameter.
Notes
-----
For more details see p.16, 18, 27 in Ref. [1] (see module docstring).
Returns the bandwidth estimate that maximizes the leave-out-out
likelihood. The leave-one-out log likelihood function is:
.. math:: \ln L=\sum_{i=1}^{n}\ln f_{-i}(X_{i})
The leave-one-out kernel estimator of :math:`f_{-i}` is:
.. math:: f_{-i}(X_{i})=\frac{1}{(n-1)h}
\sum_{j=1,j\neq i}K_{h}(X_{i},X_{j})
where :math:`K_{h}` represents the Generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j})=\prod_{s=1}^
{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
# the initial value for the optimization is the normal_reference
h0 = self._normal_reference()
bw = optimize.fmin(self.loo_likelihood, x0=h0, args=(np.log, ),
maxiter=1e3, maxfun=1e3, disp=0, xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def _cv_ls(self):
"""
Returns the cross-validation least squares bandwidth parameter(s).
Notes
-----
For more details see pp. 16, 27 in Ref. [1] (see module docstring).
Returns the value of the bandwidth that maximizes the integrated mean
square error between the estimated and actual distribution. The
integrated mean square error (IMSE) is given by:
.. math:: \int\left[\hat{f}(x)-f(x)\right]^{2}dx
This is the general formula for the IMSE. The IMSE differs for
conditional (``KDEMultivariateConditional``) and unconditional
(``KDEMultivariate``) kernel density estimation.
"""
h0 = self._normal_reference()
bw = optimize.fmin(self.imse, x0=h0, maxiter=1e3, maxfun=1e3, disp=0,
xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def loo_likelihood(self):
raise NotImplementedError
class EstimatorSettings(object):
"""
Object to specify settings for density estimation or regression.
`EstimatorSettings` has several proporties related to how bandwidth
estimation for the `KDEMultivariate`, `KDEMultivariateConditional`,
`KernelReg` and `CensoredKernelReg` classes behaves.
Parameters
----------
efficient: bool, optional
If True, the bandwidth estimation is to be performed
efficiently -- by taking smaller sub-samples and estimating
the scaling factor of each subsample. This is useful for large
samples (nobs >> 300) and/or multiple variables (k_vars > 3).
If False (default), all data is used at the same time.
randomize: bool, optional
If True, the bandwidth estimation is to be performed by
taking `n_res` random resamples (with replacement) of size `n_sub` from
the full sample. If set to False (default), the estimation is
performed by slicing the full sample in sub-samples of size `n_sub` so
that all samples are used once.
n_sub: int, optional
Size of the sub-samples. Default is 50.
n_res: int, optional
The number of random re-samples used to estimate the bandwidth.
Only has an effect if ``randomize == True``. Default value is 25.
return_median: bool, optional
If True (default), the estimator uses the median of all scaling factors
for each sub-sample to estimate the bandwidth of the full sample.
If False, the estimator uses the mean.
return_only_bw: bool, optional
If True, the estimator is to use the bandwidth and not the
scaling factor. This is *not* theoretically justified.
Should be used only for experimenting.
n_jobs : int, optional
The number of jobs to use for parallel estimation with
``joblib.Parallel``. Default is -1, meaning ``n_cores - 1``, with
``n_cores`` the number of available CPU cores.
See the `joblib documentation
<https://pythonhosted.org/joblib/parallel.html>`_ for more details.
Examples
--------
>>> settings = EstimatorSettings(randomize=True, n_jobs=3)
>>> k_dens = KDEMultivariate(data, var_type, defaults=settings)
"""
def __init__(self, efficient=False, randomize=False, n_res=25, n_sub=50,
return_median=True, return_only_bw=False, n_jobs=-1):
self.efficient = efficient
self.randomize = randomize
self.n_res = n_res
self.n_sub = n_sub
self.return_median = return_median
self.return_only_bw = return_only_bw # TODO: remove this?
self.n_jobs = n_jobs
class LeaveOneOut(object):
"""
Generator to give leave-one-out views on X.
Parameters
----------
X : array-like
2-D array.
Examples
--------
>>> X = np.random.normal(0, 1, [10,2])
>>> loo = LeaveOneOut(X)
>>> for x in loo:
... print x
Notes
-----
A little lighter weight than sklearn LOO. We don't need test index.
Also passes views on X, not the index.
"""
def __init__(self, X):
self.X = np.asarray(X)
def __iter__(self):
X = self.X
nobs, k_vars = np.shape(X)
for i in range(nobs):
index = np.ones(nobs, dtype=np.bool)
index[i] = False
yield X[index, :]
def _get_type_pos(var_type):
ix_cont = np.array([c == 'c' for c in var_type])
ix_ord = np.array([c == 'o' for c in var_type])
ix_unord = np.array([c == 'u' for c in var_type])
return ix_cont, ix_ord, ix_unord
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat
def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
#Kval = []
#for ii, vtype in enumerate(var_type):
# func = kernel_func[kertypes[vtype]]
# Kval.append(func(bw[ii], data[:, ii], data_predict[ii]))
#Kval = np.column_stack(Kval)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens
|
bsd-3-clause
|
jobovy/apogee-maps
|
py/plot_ah_location.py
|
1
|
8658
|
###############################################################################
# plot_ah_location: plot the range of extinctions effor a given location
###############################################################################
import os, os.path
import sys
import pickle
import numpy
import matplotlib
matplotlib.use('Agg')
from galpy.util import save_pickles, bovy_plot
from matplotlib import rc, pyplot
import mwdust
import apogee.select.apogeeSelect
from define_rcsample import get_rcsample
_PLOTDIST= True
_LW= 1.5
def plot_ah_location(location,plotname):
# Setup selection function
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
else:
# Setup selection function
apo= apogee.select.apogeeSelect()
# Delete these because they're big and we don't need them
del apo._specdata
del apo._photdata
save_pickles(selectFile,apo)
glon, glat= apo.glonGlat(location)
glon= glon[0]
glat= glat[0]
ahFile= '../savs/ah-%i.sav' % location
if not os.path.exists(ahFile):
# Distances at which to calculate the extinction
distmods= numpy.linspace(7.,15.5,301)
ds= 10.**(distmods/5-2.)
# Setup Green et al. (2015) dust map
gd= mwdust.Green15(filter='2MASS H')
pa, ah= gd.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_default= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,axis=0)/numpy.sum(pa)
stdah_default= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_default**2.)
# Marshall et al. (2006)
marshall= mwdust.Marshall06(filter='2MASS H')
try:
pa, ah= marshall.dust_vals_disk(glon,glat,ds,apo.radius(location))
except IndexError:
meanah_marshall= -numpy.ones_like(ds)
stdah_marshall= -numpy.ones_like(ds)
else:
meanah_marshall= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,
axis=0)/numpy.sum(pa)
stdah_marshall= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_marshall**2.)
if True:
# Drimmel et al. (2003)
drimmel= mwdust.Drimmel03(filter='2MASS H')
pa, ah= drimmel.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_drimmel= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,axis=0)/numpy.sum(pa)
stdah_drimmel= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_drimmel**2.)
else:
meanah_drimmel= -numpy.ones_like(ds)
stdah_drimmel= -numpy.ones_like(ds)
if True:
# Sale et al. (2014)
sale= mwdust.Sale14(filter='2MASS H')
try:
pa, ah= sale.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_sale= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,
axis=0)/numpy.sum(pa)
except (TypeError,ValueError):
meanah_sale= -numpy.ones_like(ds)
stdah_sale= -numpy.ones_like(ds)
else:
stdah_sale= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_sale**2.)
else:
meanah_sale= -numpy.ones_like(ds)
stdah_sale= -numpy.ones_like(ds)
save_pickles(ahFile,distmods,meanah_default,stdah_default,
meanah_marshall,stdah_marshall,
meanah_drimmel,stdah_drimmel,
meanah_sale,stdah_sale)
else:
with open(ahFile,'rb') as savefile:
distmods= pickle.load(savefile)
meanah_default= pickle.load(savefile)
stdah_default= pickle.load(savefile)
meanah_marshall= pickle.load(savefile)
stdah_marshall= pickle.load(savefile)
meanah_drimmel= pickle.load(savefile)
stdah_drimmel= pickle.load(savefile)
meanah_sale= pickle.load(savefile)
stdah_sale= pickle.load(savefile)
# Now plot
bovy_plot.bovy_print(fig_height=3.)
if _PLOTDIST:
distmods= 10.**(distmods/5-2.)
xrange= [0.,12.]
xlabel=r'$D\,(\mathrm{kpc})$'
else:
xrange=[7.,15.8],
xlabel=r'$\mathrm{distance\ modulus}\ \mu$'
ylabel=r'$A_H$'
yrange= [0.,1.2*numpy.amax(numpy.vstack((meanah_default+stdah_default,
meanah_marshall+stdah_marshall,
meanah_drimmel+stdah_drimmel,
meanah_sale+stdah_sale)))]
line_default= bovy_plot.bovy_plot(distmods,meanah_default,
'b-',lw=_LW,zorder=12,
xrange=xrange,
xlabel=xlabel,
yrange=yrange,
ylabel=ylabel)
pyplot.fill_between(distmods,
meanah_default-stdah_default,
meanah_default+stdah_default,
hatch='/',facecolor=(0,0,0,0),
color='b',lw=0.25,zorder=4)
line_marshall= bovy_plot.bovy_plot(distmods,meanah_marshall,'r-',lw=_LW,
overplot=True,
zorder=8)
pyplot.fill_between(distmods,
meanah_marshall-stdah_marshall,
meanah_marshall+stdah_marshall,
hatch='\\',facecolor=(0,0,0,0),
color='r',lw=0.25,zorder=2)
line_drimmel= bovy_plot.bovy_plot(distmods,meanah_drimmel,'-',lw=_LW,
color='gold',
overplot=True,
zorder=7)
pyplot.fill_between(distmods,
meanah_drimmel-stdah_drimmel,
meanah_drimmel+stdah_drimmel,
hatch='///',facecolor=(0,0,0,0),
color='gold',lw=0.25,zorder=1)
line_sale= bovy_plot.bovy_plot(distmods,meanah_sale,'-',lw=_LW,
color='c',
overplot=True,
zorder=9)
pyplot.fill_between(distmods,
meanah_sale-stdah_sale,
meanah_sale+stdah_sale,
hatch='//',facecolor=(0,0,0,0),
color='c',lw=0.25,zorder=3)
if True:
data= get_rcsample()
data= data[data['LOCATION_ID'] == location]
bovy_plot.bovy_plot(data['RC_DIST'],data['AK_TARG']*1.55,
'ko',zorder=20,overplot=True,ms=2.)
if location == 4318:
pyplot.legend((line_default[0],line_sale[0]),
(r'$\mathrm{Green\ et\ al.\ (2015)}$',
r'$\mathrm{Sale\ et\ al.\ (2014)}$'),
loc='lower right',#bbox_to_anchor=(.91,.375),
numpoints=8,
prop={'size':14},
frameon=False)
elif location == 4242:
pyplot.legend((line_marshall[0],line_drimmel[0]),
(r'$\mathrm{Marshall\ et\ al.\ (2006)}$',
r'$\mathrm{Drimmel\ et\ al.\ (2003)}$'),
loc='lower right',#bbox_to_anchor=(.91,.375),
numpoints=8,
prop={'size':14},
frameon=False)
# Label
lcen, bcen= apo.glonGlat(location)
if numpy.fabs(bcen) < 0.1: bcen= 0.
bovy_plot.bovy_text(r'$(l,b) = (%.1f,%.1f)$' % (lcen,bcen),
top_right=True,size=16.)
bovy_plot.bovy_end_print(plotname,dpi=300,
bbox_extra_artists=pyplot.gca().get_children(),
bbox_inches='tight')
return None
if __name__ == '__main__':
#4240 is 30,0
plot_ah_location(int(sys.argv[1]),sys.argv[2])
|
bsd-3-clause
|
santis19/fatiando
|
gallery/gravmag/eqlayer_transform.py
|
6
|
3046
|
"""
Equivalent layer for griding and upward-continuing gravity data
-------------------------------------------------------------------------
The equivalent layer is one of the best methods for griding and upward
continuing gravity data and much more. The trade-off is that performing this
requires an inversion and later forward modeling, which are more time consuming
and more difficult to tune than the standard griding and FFT-based approaches.
This example uses the equivalent layer in :mod:`fatiando.gravmag.eqlayer` to
grid and upward continue some gravity data. There are more advanced methods in
the module than the one we are showing here. They can be more efficient but
usually require more configuration.
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
from fatiando.gravmag import prism, sphere
from fatiando.gravmag.eqlayer import EQLGravity
from fatiando.inversion import Damping
from fatiando import gridder, utils, mesher
# First thing to do is make some synthetic data to test the method. We'll use a
# single prism to keep it simple
props = {'density': 500}
model = [mesher.Prism(-5000, 5000, -200, 200, 100, 4000, props)]
# The synthetic data will be generated on a random scatter of points
area = [-8000, 8000, -5000, 5000]
x, y, z = gridder.scatter(area, 300, z=0, seed=42)
# Generate some noisy data from our model
gz = utils.contaminate(prism.gz(x, y, z, model), 0.2, seed=0)
# Now for the equivalent layer. We must setup a layer of point masses where
# we'll estimate a density distribution that fits our synthetic data
layer = mesher.PointGrid(area, 500, (20, 20))
# Estimate the density using enough damping so that won't try to fit the error
eql = EQLGravity(x, y, z, gz, layer) + 1e-22*Damping(layer.size)
eql.fit()
# Now we add the estimated densities to our layer
layer.addprop('density', eql.estimate_)
# and print some statistics of how well the estimated layer fits the data
residuals = eql[0].residuals()
print("Residuals:")
print(" mean:", residuals.mean(), 'mGal')
print(" stddev:", residuals.std(), 'mGal')
# Now I can forward model gravity data anywhere we want. For interpolation, we
# calculate it on a grid. For upward continuation, at a greater height. We can
# even combine both into a single operation.
x2, y2, z2 = gridder.regular(area, (50, 50), z=-1000)
gz_up = sphere.gz(x2, y2, z2, layer)
fig, axes = plt.subplots(1, 2, figsize=(8, 6))
ax = axes[0]
ax.set_title('Original data')
ax.set_aspect('equal')
tmp = ax.tricontourf(y/1000, x/1000, gz, 30, cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0.1, aspect=30,
orientation='horizontal').set_label('mGal')
ax.plot(y/1000, x/1000, 'xk')
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
ax = axes[1]
ax.set_title('Gridded and upward continued')
ax.set_aspect('equal')
tmp = ax.tricontourf(y2/1000, x2/1000, gz_up, 30, cmap='viridis')
fig.colorbar(tmp, ax=ax, pad=0.1, aspect=30,
orientation='horizontal').set_label('mGal')
ax.set_xlabel('y (km)')
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
lhilt/scipy
|
scipy/interpolate/fitpack2.py
|
4
|
63081
|
"""
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, ravel, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
# ############### Univariate spline ####################
_curfit_messages = {1: """
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2: """
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3: """
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10: """
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing;
must be strictly increasing if `s` is 0.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("x and y array must not contain "
"NaNs or infs.")
if s is None or s > 0:
if not np.all(diff(x) >= 0.0):
raise ValueError("x must be increasing if s > 0")
else:
if not np.all(diff(x) > 0.0):
raise ValueError("x must be strictly increasing if s = 0")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
xe=bbox[1], s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None, None, None, None, None, k, None, len(t), t,
c, None, None, None, None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n, t, c, k, ier = data[7], data[8], data[9], data[5], data[-1]
self._eval_args = t[:n], c[:n], k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k, m = data[5], len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in
[8, 9, 11, 12]]
args = data[:8] + (t, c, n, fpint, nrdata, data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k, n = data[5], data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k, n = data[5], data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a, b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d, ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z, m, ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
# if self.ext is 'const', derivative.ext will be 'zeros'
ext = 1 if self.ext == 3 else self.ext
return UnivariateSpline._from_tck(tck, ext=ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data.
Spline function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be strictly increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("Input must not contain NaNs or infs.")
if not np.all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x, y, k, w=w, xb=bbox[0],
xe=bbox[1], s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
if not np.all(diff(x) >= 0.0):
raise ValueError('x must be increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
# ############### Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
Note that the axis ordering is inverted relative to
the output of meshgrid.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
x = np.asarray(x)
y = np.asarray(y)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z, ier = dfitpack.pardeu(tx, ty, c, kx, ky, dx, dy, x, y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z, ier = dfitpack.bispeu(tx, ty, c, kx, ky, x, y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1: """
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2: """
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3: """
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4: """
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5: """
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10: """
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3: """
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline :
a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
RectSphereBivariateSpline
SmoothSphereBivariateSpline :
LSQSphereBivariateSpline
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx,"
" ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
xb, xe, yb,
ye, kx, ky,
s=s, eps=eps,
lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx, tx, ny, ty, c, fp, wrk1, ier = dfitpack.surfit_smth(x, y, z, w,
xb, xe, yb,
ye, kx, ky,
s=s,
eps=eps,
lwrk2=ier)
if ier in [0, -1, -2]: # normal return
pass
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx, ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,), float)
ty1 = zeros((ny,), float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb, xe, yb, ye = bbox
tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w,
xb, xe, yb, ye,
kx, ky, eps, lwrk2=1)
if ier > 10:
tx1, ty1, c, fp, ier = dfitpack.surfit_lsq(x, y, z, tx1, ty1, w,
xb, xe, yb, ye,
kx, ky, eps, lwrk2=ier)
if ier in [0, -1, -2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not np.all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
if not np.all(diff(y) > 0.0):
raise ValueError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise ValueError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise ValueError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise ValueError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise ValueError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
Determines a smooth bicubic spline according to a given
set of knots in the `theta` and `phi` directions.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message, stacklevel=2)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians. First element (v[0]) must lie
within the interval [-pi, pi). Last element (v[-1]) must satisfy
v[-1] <= v[0] + 2*pi.
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Choosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in range(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise ValueError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise ValueError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise ValueError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise ValueError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
|
bsd-3-clause
|
Roboticmechart22/sms-tools
|
lectures/06-Harmonic-model/plots-code/spectral-peaks.py
|
22
|
1161
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 6))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis, pX,'c', lw=1.5)
plt.axis([0,7000, min(pX),10])
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
|
agpl-3.0
|
OpenDrift/opendrift
|
examples/example_model_landmask.py
|
1
|
2046
|
#!/usr/bin/env python
"""
Model landmask
===============================
Comparing two simulation runs, with landmask from ocean model and GSHHG
"""
from datetime import timedelta
from opendrift.readers import reader_ROMS_native
from opendrift.models.oceandrift import OceanDrift
lon = 14.75; lat = 68.1
o = OceanDrift(loglevel=20)
reader_nordic = reader_ROMS_native.Reader(o.test_data_folder() +
'2Feb2016_Nordic_sigma_3d/Nordic-4km_SLEVELS_avg_00_subset2Feb2016.nc')
#%%
# First run, with default GSHHG vector landmask
o.add_reader([reader_nordic])
time = reader_nordic.start_time
o.seed_elements(lon, lat, radius=3000, number=1000, time=time)
o.set_config('general:use_auto_landmask', True)
o.run(end_time=reader_nordic.end_time, time_step=1800, time_step_output=3*3600)
#%%
# Second run, with landmask from ocean model
o2 = OceanDrift(loglevel=20)
o2.add_reader([reader_nordic])
lon = 14.75; lat = 68.1
o2.seed_elements(lon, lat, radius=3000, number=1000, time=time)
o2.set_config('general:use_auto_landmask', False)
o2.run(end_time=reader_nordic.end_time, time_step=1800, time_step_output=3*3600)
#%% Prepare cusom colormap/colors for land and ocean
from matplotlib.colors import ListedColormap
import cartopy.feature as cfeature
#cmap = ListedColormap(('blue', 'red'))
cmap = ListedColormap((cfeature.COLORS['water'],
cfeature.COLORS['land']))
#%%
# .. _model_landmask_only_model:
#
# To only show the landmask from the model, hide the coastline landmask by doing:
o2.plot(background='land_binary_mask', hide_landmask=True, cmap=cmap)
#%%
# Animation illustrating that red particles strand at ocean model land cells, and black particles strand at GSHHG land polygons
o.animation(compare=o2, background='land_binary_mask', cmap=cmap,
legend=['Default GSHHG landmask', 'Ocean model landmask'])
#%%
# .. image:: /gallery/animations/example_model_landmask_0.gif
o.plot(compare=o2, background='land_binary_mask', cmap=cmap,
legend=['Default GSHHG landmask', 'Ocean model landmask'])
|
gpl-2.0
|
xionzz/earthquake
|
venv/lib/python2.7/site-packages/numpy/lib/polynomial.py
|
35
|
37641
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack, dot
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
mit
|
yehudagale/fuzzyJoiner
|
old/TripletLossFacenetLSTM-8.20.18.py
|
2
|
21235
|
import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
|
epl-1.0
|
pybel/pybel
|
tests/test_io/test_spia.py
|
1
|
10537
|
# -*- coding: utf-8 -*-
"""This module contains tests for the SPIA exporter."""
import unittest
from pandas import DataFrame
from pybel.dsl import activity, composite_abundance, pmod, protein, rna
from pybel.examples.sialic_acid_example import (
cd33, citation, evidence_1, shp1, shp2, sialic_acid_cd33_complex, sialic_acid_graph, trem2,
)
from pybel.io.spia import build_spia_matrices, get_matrix_index, to_spia_dfs, update_spia_matrices
class TestSpia(unittest.TestCase):
"""Test SPIA Exporter."""
def setUp(self):
self.sialic_acid_graph = sialic_acid_graph.copy()
def test_build_matrix(self):
"""Test build empty matrix."""
node_names = get_matrix_index(self.sialic_acid_graph)
matrix_dict = build_spia_matrices(node_names)
nodes = {'PTPN11', 'TREM2', 'PTPN6', 'TYROBP', 'CD33', 'SYK'}
self.assertEqual(set(matrix_dict["activation"].columns), nodes)
self.assertEqual(set(matrix_dict["repression"].index), nodes)
def test_update_matrix_inhibition_ubiquination(self):
"""Test updating the matrix with an inhibition ubiquitination."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2', variants=[pmod('Ub')])
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
# Initialize matrix correctly
self.assertEqual(test_matrix.values.all(), 0)
test_dict["inhibition_ubiquination"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'decreases'})
self.assertEqual(test_dict["inhibition_ubiquination"]['A']['B'], 1)
self.assertEqual(test_dict["inhibition_ubiquination"]['A']['A'], 0)
self.assertEqual(test_dict["inhibition_ubiquination"]['B']['A'], 0)
self.assertEqual(test_dict["inhibition_ubiquination"]['B']['B'], 0)
def test_update_matrix_activation_ubiquination(self):
"""Test updating the matrix with an activation ubiquitination."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2', variants=[pmod('Ub')])
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["activation_ubiquination"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'increases'})
self.assertEqual(test_dict["activation_ubiquination"]['A']['B'], 1)
self.assertEqual(test_dict["activation_ubiquination"]['A']['A'], 0)
self.assertEqual(test_dict["activation_ubiquination"]['B']['A'], 0)
self.assertEqual(test_dict["activation_ubiquination"]['B']['B'], 0)
def test_update_matrix_inhibition_phosphorylation(self):
"""Test updating the matrix with an inhibition phosphorylation."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2', variants=[pmod('Ph')])
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["inhibition_phosphorylation"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'decreases'})
self.assertEqual(test_dict["inhibition_phosphorylation"]['A']['B'], 1)
self.assertEqual(test_dict["inhibition_phosphorylation"]['A']['A'], 0)
self.assertEqual(test_dict["inhibition_phosphorylation"]['B']['A'], 0)
self.assertEqual(test_dict["inhibition_phosphorylation"]['B']['B'], 0)
def test_update_matrix_activation_phosphorylation(self):
"""Test updating the matrix with an activation phosphorylation."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2', variants=[pmod('Ph')])
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["activation_phosphorylation"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'increases'})
self.assertEqual(test_dict["activation_phosphorylation"]['A']['B'], 1)
self.assertEqual(test_dict["activation_phosphorylation"]['A']['A'], 0)
self.assertEqual(test_dict["activation_phosphorylation"]['B']['A'], 0)
self.assertEqual(test_dict["activation_phosphorylation"]['B']['B'], 0)
def test_update_matrix_expression(self):
"""Test updating the matrix with RNA expression."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = rna(namespace='HGNC', name='B', identifier='2')
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["expression"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'increases'})
self.assertEqual(test_dict["expression"]['A']['B'], 1)
self.assertEqual(test_dict["expression"]['A']['A'], 0)
self.assertEqual(test_dict["expression"]['B']['A'], 0)
self.assertEqual(test_dict["expression"]['B']['B'], 0)
def test_update_matrix_repression(self):
"""Test updating the matrix with RNA repression."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = rna(namespace='HGNC', name='B', identifier='2')
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["repression"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'decreases'})
self.assertEqual(test_dict["repression"]['A']['B'], 1)
self.assertEqual(test_dict["repression"]['A']['A'], 0)
self.assertEqual(test_dict["repression"]['B']['A'], 0)
self.assertEqual(test_dict["repression"]['B']['B'], 0)
def test_update_matrix_activation(self):
"""Test updating the matrix with activation."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2')
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["activation"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'increases'})
self.assertEqual(test_dict["activation"]['A']['B'], 1)
self.assertEqual(test_dict["activation"]['A']['A'], 0)
self.assertEqual(test_dict["activation"]['B']['A'], 0)
self.assertEqual(test_dict["activation"]['B']['B'], 0)
def test_update_matrix_inhibition(self):
"""Test updating the matrix with activation."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2')
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["inhibition"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'decreases'})
self.assertEqual(test_dict["inhibition"]['A']['B'], 1)
self.assertEqual(test_dict["inhibition"]['A']['A'], 0)
self.assertEqual(test_dict["inhibition"]['B']['A'], 0)
self.assertEqual(test_dict["inhibition"]['B']['B'], 0)
def test_update_matrix_association(self):
"""Test updating the matrix with association."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2')
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["binding_association"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'association'})
self.assertEqual(test_dict["binding_association"]['A']['B'], 1)
self.assertEqual(test_dict["binding_association"]['A']['A'], 0)
self.assertEqual(test_dict["binding_association"]['B']['A'], 0)
self.assertEqual(test_dict["binding_association"]['B']['B'], 0)
def test_update_matrix_pmods(self):
"""Test updating the matrix with multiple protein modifications."""
sub = protein(namespace='HGNC', name='A', identifier='1')
obj = protein(namespace='HGNC', name='B', identifier='2', variants=[pmod('Ub'), pmod('Ph')])
index = {'A', 'B'}
test_dict = {}
test_matrix = DataFrame(0, index=index, columns=index)
test_dict["activation_ubiquination"] = test_matrix
test_dict["activation_phosphorylation"] = test_matrix
update_spia_matrices(test_dict, sub, obj, {'relation': 'increases'})
self.assertEqual(test_dict["activation_ubiquination"]['A']['B'], 1)
self.assertEqual(test_dict["activation_ubiquination"]['A']['A'], 0)
self.assertEqual(test_dict["activation_ubiquination"]['B']['A'], 0)
self.assertEqual(test_dict["activation_ubiquination"]['B']['B'], 0)
self.assertEqual(test_dict["activation_phosphorylation"]['A']['B'], 1)
self.assertEqual(test_dict["activation_phosphorylation"]['A']['A'], 0)
self.assertEqual(test_dict["activation_phosphorylation"]['B']['A'], 0)
self.assertEqual(test_dict["activation_phosphorylation"]['B']['B'], 0)
def test_spia_matrix_complexes(self):
"""Test handling of complexes."""
self.sialic_acid_graph.add_increases(
sialic_acid_cd33_complex,
trem2,
citation=citation,
annotations={'Species': '9606', 'Confidence': 'High'},
evidence=evidence_1,
target_modifier=activity(),
)
spia_dfs = to_spia_dfs(self.sialic_acid_graph)
self.assertEqual(spia_dfs["activation"][cd33.name][trem2.name], 1)
def test_spia_matrix_composites(self):
"""Test handling of composites."""
shp = composite_abundance([shp1, shp2])
self.sialic_acid_graph.add_increases(
shp,
trem2,
citation=citation,
annotations={'Species': '9606', 'Confidence': 'High'},
evidence=evidence_1,
target_modifier=activity(),
)
spia_dfs = to_spia_dfs(self.sialic_acid_graph)
self.assertEqual(spia_dfs["activation"][shp1.name][trem2.name], 1)
self.assertEqual(spia_dfs["activation"][shp2.name][trem2.name], 1)
|
mit
|
detrout/debian-statsmodels
|
statsmodels/graphics/tests/test_dotplot.py
|
1
|
14590
|
import numpy as np
from statsmodels.graphics.dotplots import dot_plot
import pandas as pd
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_all():
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_dotplot.pdf")
else:
pdf = None
# Basic dotplot with points only
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot")
close_or_save(pdf, fig)
# Basic vertical dotplot
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot")
close_or_save(pdf, fig)
# Tall and skinny
plt.figure(figsize=(4,12))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax)
ax.set_title("Tall and skinny dotplot")
ax.set_xlabel("x axis label")
close_or_save(pdf, fig)
# Short and wide
plt.figure(figsize=(12,4))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Short and wide dotplot")
ax.set_ylabel("y axis label")
close_or_save(pdf, fig)
# Tall and skinny striped dotplot
plt.figure(figsize=(4,12))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True)
ax.set_title("Tall and skinny striped dotplot")
ax.set_xlim(-10, 50)
close_or_save(pdf, fig)
# Short and wide striped
plt.figure(figsize=(12,4))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True, horizontal=False)
ax.set_title("Short and wide striped dotplot")
ax.set_ylim(-10, 50)
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot with few lines")
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot with few lines")
close_or_save(pdf, fig)
# Manually set the x axis limits
plt.figure()
ax = plt.axes()
points = np.arange(20)
fig = dot_plot(points, ax=ax)
ax.set_xlim(-10, 30)
ax.set_title("Dotplot with adjusted horizontal range")
close_or_save(pdf, fig)
# Left row labels
plt.clf()
ax = plt.axes()
lines = ["ABCDEFGH"[np.random.randint(0, 8)] for k in range(20)]
points = np.random.normal(size=20)
fig = dot_plot(points, lines=lines, ax=ax)
ax.set_title("Dotplot with user-supplied labels in the left margin")
close_or_save(pdf, fig)
# Left and right row labels
plt.clf()
ax = plt.axes()
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::")
ax.set_title("Dotplot with user-supplied labels in both margins")
close_or_save(pdf, fig)
# Both sides row labels
plt.clf()
ax = plt.axes([0.1, 0.1, 0.88, 0.8])
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
horizontal=False)
txt = ax.set_title("Vertical dotplot with user-supplied labels in both margins")
txt.set_position((0.5, 1.06))
close_or_save(pdf, fig)
# Custom colors and symbols
plt.clf()
ax = plt.axes([0.1, 0.07, 0.78, 0.85])
points = np.random.normal(size=20)
lines = np.kron(range(5), np.ones(4)).astype(np.int32)
styles = np.kron(np.ones(5), range(4)).astype(np.int32)
#marker_props = {k: {"color": "rgbc"[k], "marker": "osvp"[k],
# "ms": 7, "alpha": 0.6} for k in range(4)}
# python 2.6 compat, can be removed later
marker_props = dict((k, {"color": "rgbc"[k], "marker": "osvp"[k],
"ms": 7, "alpha": 0.6}) for k in range(4))
fig = dot_plot(points, lines=lines, styles=styles, ax=ax,
marker_props=marker_props)
ax.set_title("Dotplot with custom colors and symbols")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals
plt.clf()
ax = plt.axes()
points = range(20)
fig = dot_plot(points, intervals=np.ones(20), ax=ax)
ax.set_title("Dotplot with symmetric intervals")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals, pandas inputs.
plt.clf()
ax = plt.axes()
points = pd.Series(range(20))
intervals = pd.Series(np.ones(20))
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with symmetric intervals (Pandas inputs)")
close_or_save(pdf, fig)
# Basic dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Vertical dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax, horizontal=False)
ax.set_title("Vertical dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Dotplot with nonsymmetric intervals, adjust line properties
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for x in range(20)]
line_props = {0: {"color": "lightgrey",
"solid_capstyle": "round"}}
fig = dot_plot(points, intervals=intervals, line_props=line_props, ax=ax)
ax.set_title("Dotplot with custom line properties")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
styles_order=["Dog", "Cat"])
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
styles_order = ["Dog", "Cat"]
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
horizontal=False, styles_order=styles_order)
handles, labels = ax.get_legend_handles_labels()
lh = dict(zip(labels, handles))
handles = [lh[l] for l in styles_order]
leg = plt.figlegend(handles, styles_order, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, striped=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
plt.ylim(-20, 20)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax)
ax.set_title("Dotplot with sections")
close_or_save(pdf, fig)
# Vertical dotplot with sections
plt.clf()
ax = plt.axes([0.1,0.1,0.9,0.75])
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles,
sections=sections, ax=ax, horizontal=False)
txt = ax.set_title("Vertical dotplot with sections")
txt.set_position((0.5, 1.08))
close_or_save(pdf, fig)
# Reorder sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax,
section_order=["Byy", "Axx", "Czz"])
ax.set_title("Dotplot with sections in specified order")
close_or_save(pdf, fig)
# Reorder the lines.
plt.figure()
ax = plt.axes()
points = np.arange(4)
lines = ["A", "B", "C", "D"]
line_order = ["B", "C", "A", "D"]
fig = dot_plot(points, lines=lines, line_order=line_order, ax=ax)
ax.set_title("Dotplot with reordered lines")
close_or_save(pdf, fig)
# Dotplot with different numbers of points per line
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = []
ii = 0
while len(lines) < 40:
for k in range(np.random.randint(1, 4)):
lines.append(ii)
ii += 1
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with different numbers of points per line")
close_or_save(pdf, fig)
if pdf_output:
pdf.close()
|
bsd-3-clause
|
ilyes14/scikit-learn
|
examples/mixture/plot_gmm_classifier.py
|
250
|
3918
|
"""
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
trenton3983/Data_Science_from_Scratch
|
code-python3/natural_language_processing.py
|
12
|
10000
|
import math, random, re
from collections import defaultdict, Counter
from bs4 import BeautifulSoup
import requests
def plot_resumes(plt):
data = [ ("big data", 100, 15), ("Hadoop", 95, 25), ("Python", 75, 50),
("R", 50, 40), ("machine learning", 80, 20), ("statistics", 20, 60),
("data science", 60, 70), ("analytics", 90, 3),
("team player", 85, 85), ("dynamic", 2, 90), ("synergies", 70, 0),
("actionable insights", 40, 30), ("think out of the box", 45, 10),
("self-starter", 30, 50), ("customer focus", 65, 15),
("thought leadership", 35, 35)]
def text_size(total):
"""equals 8 if total is 0, 28 if total is 200"""
return 8 + total / 200 * 20
for word, job_popularity, resume_popularity in data:
plt.text(job_popularity, resume_popularity, word,
ha='center', va='center',
size=text_size(job_popularity + resume_popularity))
plt.xlabel("Popularity on Job Postings")
plt.ylabel("Popularity on Resumes")
plt.axis([0, 100, 0, 100])
plt.show()
#
# n-gram models
#
def fix_unicode(text):
return text.replace(u"\u2019", "'")
def get_document():
url = "http://radar.oreilly.com/2010/06/what-is-data-science.html"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
content = soup.find("div", "article-body") # find article-body div
regex = r"[\w']+|[\.]" # matches a word or a period
document = []
for paragraph in content("p"):
words = re.findall(regex, fix_unicode(paragraph.text))
document.extend(words)
return document
def generate_using_bigrams(transitions):
current = "." # this means the next word will start a sentence
result = []
while True:
next_word_candidates = transitions[current] # bigrams (current, _)
current = random.choice(next_word_candidates) # choose one at random
result.append(current) # append it to results
if current == ".": return " ".join(result) # if "." we're done
def generate_using_trigrams(starts, trigram_transitions):
current = random.choice(starts) # choose a random starting word
prev = "." # and precede it with a '.'
result = [current]
while True:
next_word_candidates = trigram_transitions[(prev, current)]
next = random.choice(next_word_candidates)
prev, current = current, next
result.append(current)
if current == ".":
return " ".join(result)
def is_terminal(token):
return token[0] != "_"
def expand(grammar, tokens):
for i, token in enumerate(tokens):
# ignore terminals
if is_terminal(token): continue
# choose a replacement at random
replacement = random.choice(grammar[token])
if is_terminal(replacement):
tokens[i] = replacement
else:
tokens = tokens[:i] + replacement.split() + tokens[(i+1):]
return expand(grammar, tokens)
# if we get here we had all terminals and are done
return tokens
def generate_sentence(grammar):
return expand(grammar, ["_S"])
#
# Gibbs Sampling
#
def roll_a_die():
return random.choice([1,2,3,4,5,6])
def direct_sample():
d1 = roll_a_die()
d2 = roll_a_die()
return d1, d1 + d2
def random_y_given_x(x):
"""equally likely to be x + 1, x + 2, ... , x + 6"""
return x + roll_a_die()
def random_x_given_y(y):
if y <= 7:
# if the total is 7 or less, the first die is equally likely to be
# 1, 2, ..., (total - 1)
return random.randrange(1, y)
else:
# if the total is 7 or more, the first die is equally likely to be
# (total - 6), (total - 5), ..., 6
return random.randrange(y - 6, 7)
def gibbs_sample(num_iters=100):
x, y = 1, 2 # doesn't really matter
for _ in range(num_iters):
x = random_x_given_y(y)
y = random_y_given_x(x)
return x, y
def compare_distributions(num_samples=1000):
counts = defaultdict(lambda: [0, 0])
for _ in range(num_samples):
counts[gibbs_sample()][0] += 1
counts[direct_sample()][1] += 1
return counts
#
# TOPIC MODELING
#
def sample_from(weights):
total = sum(weights)
rnd = total * random.random() # uniform between 0 and total
for i, w in enumerate(weights):
rnd -= w # return the smallest i such that
if rnd <= 0: return i # sum(weights[:(i+1)]) >= rnd
documents = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
K = 4
document_topic_counts = [Counter()
for _ in documents]
topic_word_counts = [Counter() for _ in range(K)]
topic_counts = [0 for _ in range(K)]
document_lengths = [len(d) for d in documents]
distinct_words = set(word for document in documents for word in document)
W = len(distinct_words)
D = len(documents)
def p_topic_given_document(topic, d, alpha=0.1):
"""the fraction of words in document _d_
that are assigned to _topic_ (plus some smoothing)"""
return ((document_topic_counts[d][topic] + alpha) /
(document_lengths[d] + K * alpha))
def p_word_given_topic(word, topic, beta=0.1):
"""the fraction of words assigned to _topic_
that equal _word_ (plus some smoothing)"""
return ((topic_word_counts[topic][word] + beta) /
(topic_counts[topic] + W * beta))
def topic_weight(d, word, k):
"""given a document and a word in that document,
return the weight for the k-th topic"""
return p_word_given_topic(word, k) * p_topic_given_document(k, d)
def choose_new_topic(d, word):
return sample_from([topic_weight(d, word, k)
for k in range(K)])
random.seed(0)
document_topics = [[random.randrange(K) for word in document]
for document in documents]
for d in range(D):
for word, topic in zip(documents[d], document_topics[d]):
document_topic_counts[d][topic] += 1
topic_word_counts[topic][word] += 1
topic_counts[topic] += 1
for iter in range(1000):
for d in range(D):
for i, (word, topic) in enumerate(zip(documents[d],
document_topics[d])):
# remove this word / topic from the counts
# so that it doesn't influence the weights
document_topic_counts[d][topic] -= 1
topic_word_counts[topic][word] -= 1
topic_counts[topic] -= 1
document_lengths[d] -= 1
# choose a new topic based on the weights
new_topic = choose_new_topic(d, word)
document_topics[d][i] = new_topic
# and now add it back to the counts
document_topic_counts[d][new_topic] += 1
topic_word_counts[new_topic][word] += 1
topic_counts[new_topic] += 1
document_lengths[d] += 1
if __name__ == "__main__":
document = get_document()
bigrams = list(zip(document, document[1:]))
transitions = defaultdict(list)
for prev, current in bigrams:
transitions[prev].append(current)
random.seed(0)
print("bigram sentences")
for i in range(10):
print(i, generate_using_bigrams(transitions))
print()
# trigrams
trigrams = list(zip(document, document[1:], document[2:]))
trigram_transitions = defaultdict(list)
starts = []
for prev, current, next in trigrams:
if prev == ".": # if the previous "word" was a period
starts.append(current) # then this is a start word
trigram_transitions[(prev, current)].append(next)
print("trigram sentences")
for i in range(10):
print(i, generate_using_trigrams(starts, trigram_transitions))
print()
grammar = {
"_S" : ["_NP _VP"],
"_NP" : ["_N",
"_A _NP _P _A _N"],
"_VP" : ["_V",
"_V _NP"],
"_N" : ["data science", "Python", "regression"],
"_A" : ["big", "linear", "logistic"],
"_P" : ["about", "near"],
"_V" : ["learns", "trains", "tests", "is"]
}
print("grammar sentences")
for i in range(10):
print(i, " ".join(generate_sentence(grammar)))
print()
print("gibbs sampling")
comparison = compare_distributions()
for roll, (gibbs, direct) in comparison.items():
print(roll, gibbs, direct)
# topic MODELING
for k, word_counts in enumerate(topic_word_counts):
for word, count in word_counts.most_common():
if count > 0: print(k, word, count)
topic_names = ["Big Data and programming languages",
"databases",
"machine learning",
"statistics"]
for document, topic_counts in zip(documents, document_topic_counts):
print(document)
for topic, count in topic_counts.most_common():
if count > 0:
print(topic_names[topic], count)
print()
|
unlicense
|
ibis-project/ibis
|
ibis/backends/impala/tests/test_partition.py
|
1
|
8063
|
from posixpath import join as pjoin
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.util as util
from ibis.backends.impala.compat import ImpylaError
from ibis.tests.util import assert_equal
pytestmark = pytest.mark.impala
@pytest.fixture
def df():
df = pd.DataFrame(
{
'year': [2009] * 3 + [2010] * 3,
'month': list(map(str, [1, 2, 3] * 2)),
'value': list(range(1, 7)),
},
index=list(range(6)),
)
df = pd.concat([df] * 10, ignore_index=True)
df['id'] = df.index.values
return df
@pytest.fixture
def unpart_t(con, df, tmp_db):
pd_name = '__ibis_test_partition_{}'.format(util.guid())
con.create_table(pd_name, df, database=tmp_db)
try:
yield con.table(pd_name, database=tmp_db)
finally:
assert con.exists_table(pd_name, database=tmp_db), pd_name
con.drop_table(pd_name, database=tmp_db)
def test_is_partitioned(con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'string')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
assert con.table(name).is_partitioned
def test_create_table_with_partition_column(con, temp_table_db):
schema = ibis.schema(
[
('year', 'int32'),
('month', 'string'),
('day', 'int8'),
('value', 'double'),
]
)
tmp_db, name = temp_table_db
con.create_table(
name, schema=schema, database=tmp_db, partition=['year', 'month']
)
# the partition column get put at the end of the table
ex_schema = ibis.schema(
[
('day', 'int8'),
('value', 'double'),
('year', 'int32'),
('month', 'string'),
]
)
table_schema = con.get_schema(name, database=tmp_db)
assert_equal(table_schema, ex_schema)
partition_schema = con.database(tmp_db).table(name).partition_schema()
expected = ibis.schema([('year', 'int32'), ('month', 'string')])
assert_equal(partition_schema, expected)
def test_create_partitioned_separate_schema(con, temp_table):
schema = ibis.schema([('day', 'int8'), ('value', 'double')])
part_schema = ibis.schema([('year', 'int32'), ('month', 'string')])
name = temp_table
con.create_table(name, schema=schema, partition=part_schema)
# the partition column get put at the end of the table
ex_schema = ibis.schema(
[
('day', 'int8'),
('value', 'double'),
('year', 'int32'),
('month', 'string'),
]
)
table_schema = con.get_schema(name)
assert_equal(table_schema, ex_schema)
partition_schema = con.table(name).partition_schema()
assert_equal(partition_schema, part_schema)
def test_unpartitioned_table_get_schema(con):
tname = 'functional_alltypes'
with pytest.raises(ImpylaError):
con.table(tname).partition_schema()
def test_insert_select_partitioned_table(con, df, temp_table, unpart_t):
part_keys = ['year', 'month']
con.create_table(temp_table, schema=unpart_t.schema(), partition=part_keys)
part_t = con.table(temp_table)
unique_keys = df[part_keys].drop_duplicates()
for i, (year, month) in enumerate(unique_keys.itertuples(index=False)):
select_stmt = unpart_t[
(unpart_t.year == year) & (unpart_t.month == month)
]
# test both styles of insert
if i:
part = {'year': year, 'month': month}
else:
part = [year, month]
part_t.insert(select_stmt, partition=part)
verify_partitioned_table(part_t, df, unique_keys)
def test_create_partitioned_table_from_expr(con, alltypes):
t = alltypes
expr = t[t.id <= 10][['id', 'double_col', 'month', 'year']]
name = 'tmppart_{}'.format(util.guid())
try:
con.create_table(name, expr, partition=[t.year])
except Exception:
raise
else:
new = con.table(name)
expected = expr.execute().sort_values('id').reset_index(drop=True)
result = new.execute().sort_values('id').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
finally:
con.drop_table(name, force=True)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_insert_overwrite_partition():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_dynamic_partitioning():
assert False
def test_add_drop_partition_no_location(con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'int16')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
table = con.table(name)
part = {'year': 2007, 'month': 4}
table.add_partition(part)
assert len(table.partitions()) == 2
table.drop_partition(part)
assert len(table.partitions()) == 1
def test_add_drop_partition_owned_by_impala(hdfs, con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'int16')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
table = con.table(name)
part = {'year': 2007, 'month': 4}
subdir = util.guid()
basename = util.guid()
path = '/tmp/{}/{}'.format(subdir, basename)
hdfs.mkdir('/tmp/{}'.format(subdir))
hdfs.chown('/tmp/{}'.format(subdir), owner='impala', group='supergroup')
table.add_partition(part, location=path)
assert len(table.partitions()) == 2
table.drop_partition(part)
assert len(table.partitions()) == 1
def test_add_drop_partition_hive_bug(con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'int16')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
table = con.table(name)
part = {'year': 2007, 'month': 4}
path = '/tmp/{}'.format(util.guid())
table.add_partition(part, location=path)
assert len(table.partitions()) == 2
table.drop_partition(part)
assert len(table.partitions()) == 1
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_set_partition_location():
assert False
def test_load_data_partition(con, hdfs, tmp_dir, unpart_t, df, temp_table):
part_keys = ['year', 'month']
con.create_table(temp_table, schema=unpart_t.schema(), partition=part_keys)
part_t = con.table(temp_table)
# trim the runtime of this test
df = df[df.month == '1'].reset_index(drop=True)
unique_keys = df[part_keys].drop_duplicates()
hdfs_dir = pjoin(tmp_dir, 'load-data-partition')
df2 = df.drop(['year', 'month'], axis='columns')
csv_props = {'serialization.format': ',', 'field.delim': ','}
for i, (year, month) in enumerate(unique_keys.itertuples(index=False)):
chunk = df2[(df.year == year) & (df.month == month)]
chunk_path = pjoin(hdfs_dir, '{}.csv'.format(i))
con.write_dataframe(chunk, chunk_path)
# test both styles of insert
if i:
part = {'year': year, 'month': month}
else:
part = [year, month]
part_t.add_partition(part)
part_t.alter_partition(part, format='text', serde_properties=csv_props)
part_t.load_data(chunk_path, partition=part)
hdfs.rmdir(hdfs_dir)
verify_partitioned_table(part_t, df, unique_keys)
def verify_partitioned_table(part_t, df, unique_keys):
result = (
part_t.execute()
.sort_values(by='id')
.reset_index(drop=True)[df.columns]
)
tm.assert_frame_equal(result, df)
parts = part_t.partitions()
# allow for the total line
assert len(parts) == len(unique_keys) + 1
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_drop_partition():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_repartition_automated():
assert False
|
apache-2.0
|
ctn-waterloo/nengo_theano
|
nengo_theano/test/test_learning.py
|
1
|
2191
|
"""This is a test file to test basic learning
"""
import math
import time
import numpy as np
import matplotlib.pyplot as plt
import nengo_theano as nef
neurons = 30 # number of neurons in all ensembles
start_time = time.time()
net = nef.Network('Learning Test')
import random
class TrainingInput(nef.SimpleNode):
def init(self):
self.input_vals = np.arange(-1, 1, .2)
self.period_length = 1
self.choose_time = 0.0
def origin_ILinput(self):
if (self.t >= self.choose_time):
# choose an input randomly from the set
self.index = random.randint(0,9)
# specify the correct response for this input
if (self.index < 5): self.correct_response = [.6]
else: self.correct_response = [0.2]
# update the time to next change the input again
self.choose_time = self.t + self.period_length
return [self.input_vals[self.index]]
def origin_goal(self):
return self.correct_response
def reset(self, randomize=False):
self.choose_time = 0.0
nef.SimpleNode.reset(self, randomize)
net.add(TrainingInput('SNinput'))
#net.make_input('in', values=0.8)
net.make('A', neurons=neurons, dimensions=1)
net.make('B', neurons=2*neurons, dimensions=1)
net.make('error1', neurons=neurons, dimensions=1, mode='direct')
net.learn(pre='A', post='B', error='error1', rate=5e-5, pstc=.005)
net.connect('SNinput:ILinput', 'A')
net.connect('A', 'error1')
net.connect('B', 'error1', weight=-1)
t_final = 10
dt_step = 0.001
pstc = 0.03
Ip = net.make_probe('SNinput:ILinput', dt_sample=dt_step, pstc=pstc)
Ap = net.make_probe('A', dt_sample=dt_step, pstc=pstc)
Bp = net.make_probe('B', dt_sample=dt_step, pstc=pstc)
E1p = net.make_probe('error1', dt_sample=dt_step, pstc=pstc)
build_time = time.time()
print "build time: ", build_time - start_time
net.run(t_final)
print "sim time: ", time.time() - build_time
plt.ioff(); plt.close()
t = np.linspace(0, t_final, len(Ap.get_data()))
plt.plot(t, Ap.get_data())
plt.plot(t, Bp.get_data())
plt.plot(t, E1p.get_data())
plt.legend(['A', 'B', 'error'])
plt.title('Normal learning')
plt.tight_layout()
plt.show()
|
mit
|
openbermuda/karmapi
|
karmapi/kpi.py
|
1
|
1843
|
"""
karmapi command line interface.
Build and get paths
Ask peers to build.
Get data from peers.
Get stats
Yosser, the builder
"""
import argparse
from datetime import date, datetime, timedelta
from karmapi import base, weather
from matplotlib import pyplot
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("path", nargs='*')
parser.add_argument("--field", nargs='*', default=[])
parser.add_argument("--start")
parser.add_argument("--end")
parser.add_argument("--source")
parser.add_argument("-n", action="store_true")
parser.add_argument("--karmapi", default='.')
return parser
def main():
args = parser.parse_args()
path_template = '{source}/time/{date:%Y/%m/%d}/{field}'
dates = []
if args.start:
start = date(*[int(x) for x in args.start.split('/')])
dates.append(start)
if args.end:
end = date(*[int(x) for x in args.end.split('/')])
aday = timedelta(days=1)
day = start + aday
while day < end:
dates.append(day)
day += aday
parms = base.Parms()
parms.source = args.source or 'euro'
paths = args.path
for field in args.field:
parms.field = field
for day in dates:
parms.date = date(day.year, day.month, day.day)
path = path_template.format(**parms.__dict__)
paths.append(path)
for path in paths:
print(path)
if args.n: continue
# need meta data to config module properly
meta = base.get_all_meta_data(path)
raw = weather.RawWeather()
raw.from_dict(meta)
data = base.get(path)
ndata = raw.day_to_numpy(data)
pyplot.imsave(path.replace('/', '-') + '.png', ndata)
if __name__ == '__main__':
main()
|
gpl-3.0
|
daler/metaseq
|
doc/example.py
|
3
|
3212
|
import numpy as np
import os
import metaseq
ip_filename = metaseq.helpers.example_filename(
'wgEncodeHaibTfbsK562Atf3V0416101AlnRep1_chr17.bam')
input_filename = metaseq.helpers.example_filename(
'wgEncodeHaibTfbsK562RxlchV0416101AlnRep1_chr17.bam')
ip_signal = metaseq.genomic_signal(ip_filename, 'bam')
input_signal = metaseq.genomic_signal(input_filename, 'bam')
# If you already have TSSs, skip this part.
import gffutils
db = gffutils.FeatureDB(
metaseq.example_filename('Homo_sapiens.GRCh37.66_chr17.gtf.db'))
import pybedtools
from pybedtools.featurefuncs import TSS
from gffutils.helpers import asinterval
def tss_generator():
for transcript in db.features_of_type('transcript'):
yield TSS(asinterval(transcript), upstream=1000, downstream=1000)
if not os.path.exists('tsses.gtf'):
tsses = pybedtools.BedTool(tss_generator()).saveas('tsses.gtf')
tsses = pybedtools.BedTool('tsses.gtf')
from metaseq import persistence
if not os.path.exists('example.npz'):
ip_array = ip_signal.array(tsses, bins=100, processes=8)
input_array = input_signal.array(tsses, bins=100, processes=8)
ip_array /= ip_signal.mapped_read_count() / 1e6
input_array /= input_signal.mapped_read_count() / 1e6
persistence.save_features_and_arrays(
features=tsses,
arrays={'ip': ip_array, 'input': input_array},
prefix='example',
link_features=True,
overwrite=True)
features, arrays = persistence.load_features_and_arrays(prefix='example')
normalized = arrays['ip'] - arrays['input']
ind = metaseq.plotutils.tip_zscores(normalized)
fig = metaseq.plotutils.imshow(
normalized,
vmin=5,
vmax=99.,
percentile=True,
sort_by=ind,
imshow_kwargs=dict(interpolation='bilinear'),
line_kwargs=dict(color='k'),
fill_kwargs=dict(color='k', alpha=0.4),
x=np.linspace(-1000, 1000, 100),
height_ratios=(2, 1, 1)
)
fig.array_axes.xaxis.set_visible(False)
fig.array_axes.set_ylabel('Transcripts on chr17')
fig.array_axes.axvline(0, color='k', linestyle='--')
fig.line_axes.set_xlabel('Distance from TSS')
fig.line_axes.axvline(0, color='k', linestyle='--')
from matplotlib import pyplot as plt
import matplotlib
d = metaseq.results_table.ResultsTable(
metaseq.example_filename('GSM847566_SL2592.table'),
import_kwargs=dict(index_col=0))
d = d.reindex_to(features, attribute='transcript_id')
import pandas
labels = pandas.qcut(d.fpkm, 4).labels
ulabels = sorted(list(set(labels)))
colors = matplotlib.cm.YlOrBr((np.array(ulabels) + 2) / 5.)
bottom_axes = plt.subplot(fig.gs[2, 0])
for q, color in zip(ulabels, colors):
ind = labels == q
print q, color
metaseq.plotutils.ci_plot(
np.linspace(-1000, 1000, 100),
normalized[ind, :],
ax=bottom_axes,
line_kwargs=dict(color=color, label=q),
fill_kwargs=dict(color=color, alpha=0.5),
)
fig.line_axes.xaxis.set_visible(False)
bottom_axes.set_xlabel('Distance from TSS')
bottom_axes.legend(loc='best', fontsize=10)
fig.array_axes.set_ylabel('Transcripts')
fig.cax.set_ylabel('Enrichment')
fig.subplots_adjust(left=0.2)
bottom_axes.set_ylabel('Enrichment')
fig.line_axes.set_ylabel('Enrichment')
plt.show()
|
mit
|
projectcuracao/projectcuracao
|
graphprep/solarwindgraph.py
|
1
|
3077
|
# solar wind graph generation
# filename: solarwindgraph.py
# Version 1.3 09/12/13
#
# contains event routines for data collection
#
#
import sys
import time
import RPi.GPIO as GPIO
import gc
import datetime
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pyplot
from matplotlib import dates
import pylab
import MySQLdb as mdb
sys.path.append('/home/pi/ProjectCuracao/main/config')
# if conflocal.py is not found, import default conf.py
# Check for user imports
try:
import conflocal as conf
except ImportError:
import conf
def solarwindgraph(source,days,delay):
print("solarwindgraph source:%s days:%s delay:%i" % (source,days,delay))
print("sleeping :",delay)
time.sleep(delay)
print("solarwindgraph running now")
# blink GPIO LED when it's run
GPIO.setmode(GPIO.BOARD)
GPIO.setup(22, GPIO.OUT)
GPIO.output(22, False)
time.sleep(0.5)
GPIO.output(22, True)
# now we have get the data, stuff it in the graph
try:
print("trying database")
db = mdb.connect('localhost', 'root', conf.databasePassword, 'ProjectCuracao');
cursor = db.cursor()
query = "SELECT TimeStamp, RegulatedWindVoltage, UnregulatedWindVoltage, SolarWind FROM batterywatchdogdata where now() - interval %i hour < TimeStamp" % (days*24)
cursor.execute(query)
result = cursor.fetchall()
t = []
s = []
u = []
v = []
for record in result:
t.append(record[0])
s.append(record[1])
u.append(record[2])
v.append(record[3])
print ("count of t=",len(t))
# scale array for Solar =0 Wind = 1
for i in range(len(v)):
v[i] = v[i] * 10
#dts = map(datetime.datetime.fromtimestamp, t)
#print dts
fds = dates.date2num(t) # converted
# matplotlib date format object
hfmt = dates.DateFormatter('%m/%d-%H')
fig = pyplot.figure()
fig.set_facecolor('white')
ax = fig.add_subplot(111,axisbg = 'white')
ax.vlines(fds, -200.0, 1000.0,colors='w')
ax.xaxis.set_major_locator(dates.HourLocator(interval=6))
ax.xaxis.set_major_formatter(hfmt)
ax.set_ylim(bottom = -200.0)
pyplot.xticks(rotation='vertical')
pyplot.subplots_adjust(bottom=.3)
pylab.plot(t, s, color='b',label="Reg Wind Volt",linestyle="",marker=".")
pylab.plot(t, u, color='r',label="Unreg Wind Volt",linestyle="",marker=".")
pylab.plot(t, v, color='g',label="Solar/Wind",linestyle="-",marker=".")
pylab.xlabel("Hours")
pylab.ylabel("Voltage")
pylab.legend(loc='upper left')
pylab.axis([min(t), max(t), 0, 20])
pylab.figtext(.5, .05, ("Solar / Wind System Last %i Days" % days),fontsize=18,ha='center')
pylab.grid(True)
pyplot.show()
pyplot.savefig("/home/pi/RasPiConnectServer/static/solarwindgraph.png")
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
finally:
cursor.close()
db.close()
del cursor
del db
fig.clf()
pyplot.close()
pylab.close()
del t, s, u, v
gc.collect()
print("solarwindgraph finished now")
|
gpl-3.0
|
polakowo/plnx-grabber
|
plnxgrabber/__init__.py
|
1
|
32357
|
# Grabber of trade history from Poloniex exchange
# https://github.com/polakowo/plnx-grabber
#
# Copyright (C) 2017 https://github.com/polakowo/plnx-grabber
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import math
import re
from datetime import datetime, timedelta
from enum import Enum
from time import sleep
from timeit import default_timer as timer
import pandas as pd
import pymongo
import pytz
from bson.codec_options import CodecOptions
from poloniex import Poloniex
# Logger
############################################################
logger = logging.getLogger(__name__)
# No logging by default
logger.addHandler(logging.NullHandler())
# Date & time
############################################################
def parse_date(date_str, fmt='%Y-%m-%d %H:%M:%S'):
# Parse dates coming from Poloniex
return pytz.utc.localize(datetime.strptime(date_str, fmt))
def dt_to_str(date, fmt='%a %d/%m/%Y %H:%M:%S %Z'):
# Format date for showing in console and logs
return date.strftime(fmt)
def now():
return pytz.utc.localize(datetime.utcnow())
def ago(**kwargs):
return now() - timedelta(**kwargs)
def begin():
return datetime(2000, 1, 1, tzinfo=pytz.utc)
class TimePeriod(Enum):
SECOND = 1
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
MONTH = 30 * DAY
YEAR = 12 * MONTH
# Dataframes
############################################################
def df_memory(df):
return df.memory_usage(index=True, deep=True).sum()
def df_series_info(df):
# Returns the most valuable information on history stored in df
# Get the order by comparing the first and last records
from_i = -1 * (df.index[0] > df.index[-1])
to_i = -1 * (df.index[0] < df.index[-1])
return {
'from_dt': df.iloc[from_i]['dt'],
'from_id': df.index[from_i],
'to_dt': df.iloc[to_i]['dt'],
'to_id': df.index[to_i],
'delta': df.iloc[to_i]['dt'] - df.iloc[from_i]['dt'],
'count': len(df.index),
'memory': df_memory(df)}
def verify_series_df(df):
# Verifies the incremental nature of trade id across history
t = timer()
series_info = df_series_info(df)
diff = series_info['count'] - (series_info['to_id'] - series_info['from_id'] + 1)
if diff > 0:
logger.warning("Dataframe - Found duplicates (%d) - %.2fs", diff, timer() - t)
elif diff < 0:
logger.warning("Dataframe - Found gaps (%d) - %.2fs", abs(diff), timer() - t)
else:
logger.debug("Dataframe - Verified - %.2fs", timer() - t)
return diff == 0
def df_to_docs(df):
# Convert df into shape suitable for export into MongoDB
return df.reset_index().to_dict(orient='records')
def docs_to_df(docs, new_index=['dt']):
# Convert docs to df
return pd.DataFrame(list(docs)).set_index(new_index, drop=True)
# Output
############################################################
def dt_to_ts(date):
return int(date.timestamp())
def format_td(td):
seconds = int(abs(td).total_seconds())
periods = [('year', 60 * 60 * 24 * 365),
('month', 60 * 60 * 24 * 30),
('day', 60 * 60 * 24),
('hour', 60 * 60),
('minute', 60),
('second', 1)]
strings = []
for period_name, period_seconds in periods:
if seconds >= period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append('%s %s' % (period_value, period_name))
else:
strings.append('%s %ss' % (period_value, period_name))
return ' '.join(strings)
def format_bytes(num):
for x in ['B', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return '%3.1f %s' % (num, x)
num /= 1024.0
def series_info_str(series_info):
return "{ %s : %d, %s : %d, %s, %d rows, %s }" % (
dt_to_str(series_info['from_dt']),
series_info['from_id'],
dt_to_str(series_info['to_dt']),
series_info['to_id'],
format_td(series_info['delta']),
series_info['count'],
format_bytes(series_info['memory']))
# MongoTS
############################################################
class MongoTS(object):
"""
Wrapper around pymongo for dealing with trade series information
"""
def __init__(self, db):
# Set running MongoDB instance
self.db = db
def db_info(self):
# Aggregates basic info on current state of db
cname_series_info = {cname: self.series_info(cname) for cname in self.list_cols()}
logger.info("Database '{0}' - {1} collections - {2:,} documents - {3}"
.format(self.db.name,
len(cname_series_info),
sum(series_info['count'] for series_info in cname_series_info.values()),
format_bytes(sum(series_info['memory'] for series_info in cname_series_info.values()))))
# Shows detailed descriptions of each collection
for cname, series_info in cname_series_info.items():
logger.info("%s - %s", cname, series_info_str(series_info))
def clear_db(self):
# Drop all collections
for cname in self.list_cols():
self.drop_col(cname)
# Collections
def tzaware_col(self, cname):
"""
Return timezone-aware dates by default
"""
options = CodecOptions(tz_aware=True, tzinfo=pytz.utc)
return self.db.get_collection(cname, codec_options=options)
def list_cols(self):
return self.db.collection_names()
def create_col(self, cname):
# Create new collection and index on timestamp field
self.db.create_collection(cname)
self.db[cname].create_index([('dt', pymongo.ASCENDING)], unique=False, background=True)
logger.debug("%s - Collection - Created", cname)
def drop_col(self, cname):
# Delete collection entirely
self.db[cname].drop()
logger.debug("%s - Collection - Dropped", cname)
def col_exists(self, cname):
return cname in self.list_cols()
def col_non_empty(self, cname):
# Check whether collection exists and not empty
return self.col_exists(cname) and self.docs_count(cname) > 0
def col_memory(self, cname):
# Returns size of all documents + header + index size
return self.db.command('collstats', cname)['size'] + 16 * 100 + self.db.command('collstats', cname)[
'totalIndexSize']
# Series
def series_info(self, cname):
# Returns the most important series information
# (start and end points, their delta, num of rows and memory taken)
from_dict = self.from_doc(cname)
to_dict = self.to_doc(cname)
return {
'from_dt': from_dict['dt'],
'from_id': from_dict['_id'],
'to_dt': to_dict['dt'],
'to_id': to_dict['_id'],
'delta': to_dict['dt'] - from_dict['dt'],
'count': self.docs_count(cname),
'memory': self.col_memory(cname)}
def verify_series(self, cname):
# Verifies the incremental nature of trade id across series
t = timer()
series_info = self.series_info(cname)
diff = series_info['count'] - (series_info['to_id'] - series_info['from_id'] + 1)
if diff > 0:
logger.warning("%s - Collection - Found duplicates (%d) - %.2fs", cname, diff, timer() - t)
elif diff < 0:
logger.warning("%s - Collection - Found gaps (%d) - %.2fs", cname, abs(diff), timer() - t)
else:
logger.debug("%s - Collection - Verified - %.2fs", cname, timer() - t)
return diff == 0
def series_range(self, cname, from_dt, to_dt):
# Get the series in the range
return self.find_docs(cname, query={'dt': {'$gte': from_dt, '$lte': to_dt}})
# Documents
def docs_count(self, cname):
# Documents count in collection
return self.db.command('collstats', cname)['count']
def from_doc(self, cname):
# Return the document for the earliest point in series
return next(self.tzaware_col(cname).find().sort([['_id', 1]]).limit(1))
def to_doc(self, cname):
# Return the document for the most recent point in series
return next(self.tzaware_col(cname).find().sort([['_id', -1]]).limit(1))
def insert_docs(self, cname, docs):
# Convert df into list of dicts and insert into collection (fast)
t = timer()
result = self.db[cname].insert_many(docs)
logger.debug("%s - Collection - Inserted %d documents - %.2fs",
cname, len(result.inserted_ids), timer() - t)
def update_docs(self, cname, docs):
# Convert df into list of dicts and only insert records not present in the collection (slow)
t = timer()
n_modified = 0
n_upserted = 0
for record in docs:
result = self.db[cname].update_one(
{'_id': record['_id']},
{'$setOnInsert': record},
upsert=True)
if result.modified_count is not None and result.modified_count > 0:
n_modified += result.modified_count
if result.upserted_id is not None:
n_upserted += 1
logger.debug("%s - Collection - Modified %d, upserted %d documents - %.2fs",
cname, n_modified, n_upserted, timer() - t)
def delete_docs(self, cname, query={}):
# Delete documents
t = timer()
result = self.db[cname].delete_many(query)
logger.debug("%s - Collection - Deleted %d documents - %.2fs",
cname, result.deleted_count, timer() - t)
def find_docs(self, cname, *args):
# Return generator for documents which match query
return self.tzaware_col(cname).find(args)
# Grabber
############################################################
class Grabber(object):
"""
Poloniex only returns max of 50,000 records at a time, meaning we have to coordinate download and
save of many chunks of data. Moreover, there is no fixed amount of records per unit of time, which
requires a synchronization of chunks by trade id.
For example: If we would like to go one month back in time, Poloniex could have returned us only
the most recent week. Because Polo returns only 50,000 of the latest records (not the oldest ones),
we can synchronize chunks only by going backwards. Otherwise, if we decided to go forwards in time,
we couldn't know which time interval to choose to fill all records in order to synchronize with
previous chunk.
"""
def __init__(self, mongo_ts):
# pymongo Wrapper
self.mongo_ts = mongo_ts
# Poloniex
self.polo = Poloniex()
def progress(self):
"""
Shows how much history was grabbed so far in relation to overall available on Poloniex
"""
cname_series_info = {cname: self.mongo_ts.series_info(cname) for cname in self.mongo_ts.list_cols()}
for pair, series_info in cname_series_info.items():
# Get latest id
df = self.get_chunk(pair, ago(minutes=15), now())
if df.empty:
logger.info("%s - No information available", pair)
continue
max_id = df_series_info(df)['to_id']
# Progress bar
steps = 50
below_rate = series_info['from_id'] / max_id
taken_rate = (series_info['to_id'] - series_info['from_id']) / max_id
above_rate = (max_id - series_info['to_id']) / max_id
progress = '_' * math.floor(below_rate * steps) + \
'x' * (steps - math.floor(below_rate * steps) - math.floor(above_rate * steps)) + \
'_' * math.floor(above_rate * steps)
logger.info("%s - 1 [ %s ] %d - %.1f/100.0%% - %s/%s",
pair,
progress,
series_info['to_id'],
taken_rate * 100,
format_bytes(series_info['memory']),
format_bytes(1 / taken_rate * series_info['memory']))
def remote_info(self, pairs):
"""
Detailed info on pairs listed on Poloniex
"""
for pair in pairs:
chart_data = Poloniex().returnChartData(pair, period=86400, start=1, end=dt_to_ts(now()))
from_dt = chart_data[0]['date']
to_dt = chart_data[-1]['date']
df = self.get_chunk(pair, ago(minutes=5), now())
if df.empty:
logger.info("%s - No information available")
continue
max_id = df_series_info(df)['to_id']
logger.info("%s - %s - %s, %s, %d trades, est. %s",
pair,
dt_to_str(from_dt, fmt='%a %d/%m/%Y'),
dt_to_str(to_dt, fmt='%a %d/%m/%Y'),
format_td(to_dt - from_dt),
max_id,
format_bytes(round(df_memory(df) * max_id / len(df.index))))
def db_info(self):
"""
Wrapper for mongo_ts.db_info
"""
self.mongo_ts.db_info()
def ticker_pairs(self):
"""
Returns all pairs from ticker
"""
ticker = self.polo.returnTicker()
pairs = set(map(lambda x: str(x).upper(), ticker.keys()))
return pairs
def get_chunk(self, pair, from_dt, to_dt):
"""
Returns a chunk of trade history (max 50,000 of the most recent records) of a period of time
:param pair: pair of symbols
:param start: date of start
:param end: date of end
:return: df
"""
try:
series = self.polo.marketTradeHist(pair, start=dt_to_ts(from_dt), end=dt_to_ts(to_dt))
series_df = pd.DataFrame(series)
series_df = series_df.astype({
'date': str,
'amount': float,
'globalTradeID': int,
'rate': float,
'total': float,
'tradeID': int,
'type': str})
series_df['date'] = series_df['date'].apply(lambda date_str: parse_date(date_str))
series_df.rename(columns={'date': 'dt', 'tradeID': '_id', 'globalTradeID': 'globalid'}, inplace=True)
series_df = series_df.set_index(['_id'], drop=True)
return series_df
except Exception as e:
logger.error(e)
return pd.DataFrame()
def grab(self, pair, from_dt=None, from_id=None, to_dt=None, to_id=None):
"""
Grabs trade history of a period of time for a pair of symbols.
* Traverses history from the end date to the start date (backwards)
* History is divided into chunks of max 50,000 records
* Chunks are synced by id of their oldest records
* Once received, each chunk is immediately put into MongoDB to free up RAM
* Result includes passed dates - [from_dt, to_dt]
* Result excludes passed ids - (from_id, to_id)
* Ids have higher priority than dates
The whole process looks like this:
1) Start recording history chunk by chunk beginning from to_dt
[ from_dt/from_id <- xxxxxxxxxxxxxxxxxxxxxxxxxx to_dt ]
or if to_id is provided, find it first and only then start recording
[ from_dt/from_id ___________ to_id <- <- <- <- to_dt ]
[ from_dt/from_id <- xxxxxxxx to_id ___________ to_dt ]
2) Each chunk is verified for consistency and inserted into MongoDB
3) Proceed until start date or id are reached, or Poloniex returned nothing
[ from_dt/from_id xxxxxxxxxxxxxxxxxxxxxxxxxxxxx to_dt ]
|
v
collected history
or if to_id is provided
[ from_dt/from_id xxxxxxxxxxx to_id ___________ to_dt ]
|
v
collected history
4) Verify whole collection
:param pair: pair of symbols
:param from_dt: date of start point (only as approximation, program aborts if found)
:param from_id: id of start point (has higher priority than ts, program aborts if found)
:param to_dt: date of end point
:param to_id: id of end point
:return: None
"""
if self.mongo_ts.col_non_empty(pair):
logger.debug("%s - Collection - %s", pair, series_info_str(self.mongo_ts.series_info(pair)))
else:
logger.debug("%s - Collection - Empty", pair)
# Create new collection only if none exists
if pair not in self.mongo_ts.list_cols():
self.mongo_ts.create_col(pair)
logger.debug("%s - Collection - Achieving { %s%s, %s%s, %s }",
pair,
dt_to_str(from_dt),
' : %d' % from_id if from_id is not None else '',
dt_to_str(to_dt),
' : %d' % to_id if to_id is not None else '',
format_td(to_dt - from_dt))
t = timer()
# Init window params
# ..................
# Dates are required to build rolling windows and pass them to Poloniex
# If start and/or end dates are empty, set the widest period possible
if from_dt is None:
from_dt = begin()
if to_dt is None:
to_dt = now()
if to_dt <= from_dt:
raise Exception("%s - Start date { %s } above end date { %s }" %
(pair, dt_to_str(from_dt), dt_to_str(to_dt)))
if from_id is not None and to_id is not None:
if to_id <= from_id:
raise Exception("%s - Start id { %d } above end id { %d }" %
(pair, from_id, to_id))
max_delta = timedelta(days=30)
window = {
# Do not fetch more than needed, pick the size smaller or equal to max_delta
'from_dt': max(to_dt - max_delta, from_dt),
'to_dt': to_dt,
# Gets filled after first chunk is fetched
'anchor_id': None
}
# Record only starting from to_id, or immediately if none is provided
recording = to_id is None
# After we recorded data, verify consistency in database
anything_recorded = False
# Three possibilities to escape the loop:
# 1) empty result
# 2) reached the start date/id
# 3) exception
while True:
t2 = timer()
# Receive and process chunk of data
# .................................
logger.debug("%s - Poloniex - Querying { %s, %s, %s }",
pair,
dt_to_str(window['from_dt']),
dt_to_str(window['to_dt']),
format_td(window['to_dt'] - window['from_dt']))
df = self.get_chunk(pair, window['from_dt'], window['to_dt'])
if df.empty:
if anything_recorded or window['from_dt'] == from_dt:
# If we finished (either by reaching start or receiving no records) -> terminate
logger.debug("%s - Poloniex - Nothing returned - aborting", pair)
break
else:
# If Poloniex temporary suspended trading for a pair -> look for older records
logger.debug("%s - Poloniex - Nothing returned - continuing", pair)
window['to_dt'] = window['from_dt']
window['from_dt'] = max(window['from_dt'] - max_delta, from_dt)
continue
# If chunk contains end id (newest bound) -> start recording
# .........................................................
if not recording:
# End id found
if to_id in df.index:
logger.debug("%s - Poloniex - End id { %d } found", pair, to_id)
# Start recording
recording = True
df = df[df.index < to_id]
if df.empty:
logger.debug("%s - Poloniex - Nothing returned - aborting", pair)
break
else:
series_info = df_series_info(df)
logger.debug("%s - Poloniex - End id { %d } not found in { %s : %d, %s : %d }",
pair,
to_id,
dt_to_str(series_info['from_dt']),
series_info['from_id'],
dt_to_str(series_info['to_dt']),
series_info['to_id'])
# If start reached -> terminate
if from_id is not None:
if any(df.index <= from_id):
logger.debug("%s - Poloniex - Start id { %d } reached - aborting", pair, from_id)
break
if any(df['dt'] <= from_dt):
logger.debug("%s - Poloniex - Start date { %s } reached - aborting", pair, dt_to_str(from_dt))
break
series_info = df_series_info(df)
window['from_dt'] = max(series_info['from_dt'] - max_delta, from_dt)
window['to_dt'] = series_info['from_dt']
continue
if recording:
# Synchronize with previous chunk by intersection of their ids
# ............................................................
if window['anchor_id'] is not None:
# To merge two dataframes, there must be an intersection of ids (anchor)
if any(df.index >= window['anchor_id']):
df = df[df.index < window['anchor_id']]
if df.empty:
logger.debug("%s - Poloniex - Nothing returned - aborting", pair)
break
else:
logger.debug("%s - Poloniex - Anchor id { %d } is missing - aborting", pair,
window['anchor_id'])
break
# If chunk contains start id or date (oldest record) -> finish recording
# ....................................................................
if from_id is not None:
if any(df.index <= from_id):
df = df[df.index > from_id]
if df.empty:
logger.debug("%s - Poloniex - Nothing returned - aborting", pair)
else:
logger.debug("%s - Poloniex - Returned %s - %.2fs",
pair, series_info_str(df_series_info(df)), timer() - t2)
logger.debug("%s - Poloniex - Start id { %d } reached - aborting", pair, from_id)
if verify_series_df(df):
self.mongo_ts.insert_docs(pair, df_to_docs(df))
anything_recorded = True
break # escape anyway
# or at least the approx. date
elif any(df['dt'] <= from_dt):
df = df[df['dt'] >= from_dt]
if df.empty:
logger.debug("%s - Poloniex - Nothing returned - aborting", pair)
else:
logger.debug("%s - Poloniex - Returned %s - %.2fs",
pair, series_info_str(df_series_info(df)), timer() - t2)
logger.debug("%s - Poloniex - Start date { %s } reached - aborting", pair, dt_to_str(from_dt))
if verify_series_df(df):
self.mongo_ts.insert_docs(pair, df_to_docs(df))
anything_recorded = True
break
# Record data
# ...........
# Drop rows with NaNs
df.dropna(inplace=True)
if df.empty:
logger.debug("%s - Poloniex - Nothing returned - aborting", pair)
break
# Drop duplicates
df.drop_duplicates(inplace=True)
if df.empty:
logger.debug("%s - Poloniex - Nothing returned - aborting", pair)
break
# If none of the start points reached, continue with execution using new window
logger.debug("%s - Poloniex - Returned %s - %.2fs",
pair, series_info_str(df_series_info(df)), timer() - t2)
# Break on last stored df if the newest chunk is broken
if not verify_series_df(df):
break
self.mongo_ts.insert_docs(pair, df_to_docs(df))
anything_recorded = True
# Continue with next chunk
# ........................
series_info = df_series_info(df)
window['from_dt'] = max(series_info['from_dt'] - max_delta, from_dt)
window['to_dt'] = series_info['from_dt']
window['anchor_id'] = series_info['from_id']
# Verify collection after recordings
# ..................................
if anything_recorded:
# Generally, series verification always succeeds, because we check each df and sync them properly
if self.mongo_ts.verify_series(pair):
logger.debug("%s - Collection - %s - %.2fs", pair, series_info_str(self.mongo_ts.series_info(pair)),
timer() - t)
else:
raise Exception("%s - Consistency broken - fix required" % pair)
else:
logger.debug("%s - Nothing returned - %.2fs", pair, timer() - t)
def one(self, pair, from_dt=None, to_dt=None, drop=False):
"""
Grabs data for a pair based on passed params as well as history stored in the underlying collection
Possible values of from_dt and to_dt:
* 'oldest' means the from_dt of the collection
* 'newest' means the to_dt of the collection
:param pair: pair of symbols
:param from_dt: date of the start point or command from ['oldest', 'newest']
:param to_dt: date of the end point or command from ['oldest', 'newest']
:param drop: delete underlying collection before insert
:return: None
"""
t = timer()
logger.info("%s - ...", pair)
# Fill dates of collection's bounds
if self.mongo_ts.col_non_empty(pair):
series_info = self.mongo_ts.series_info(pair)
if isinstance(from_dt, str):
if from_dt == 'oldest':
from_dt = series_info['from_dt']
elif from_dt == 'newest':
from_dt = series_info['to_dt']
else:
raise Exception("Unknown command '%s'" % from_dt)
if isinstance(to_dt, str):
if to_dt == 'oldest':
to_dt = series_info['from_dt']
elif to_dt == 'newest':
to_dt = series_info['to_dt']
else:
raise Exception("Unknown command '%s'" % to_dt)
# Overwrite means drop completely
if drop:
self.mongo_ts.drop_col(pair)
# If nothing is passed, fetch the widest tail and/or head possible
if from_dt is None:
from_dt = begin()
if to_dt is None:
to_dt = now()
if self.mongo_ts.col_non_empty(pair):
series_info = self.mongo_ts.series_info(pair)
# Period must be non-zero
if from_dt >= to_dt:
raise Exception("%s - Start date { %s } above end date { %s }" %
(pair, dt_to_str(from_dt), dt_to_str(to_dt)))
if from_dt < series_info['from_dt']:
logger.debug("%s - Grabbing tail", pair)
# Collect history up to the oldest record
self.grab(pair,
from_dt=from_dt,
to_dt=series_info['from_dt'],
to_id=series_info['from_id'])
if to_dt > series_info['to_dt']:
logger.debug("%s - Grabbing head", pair)
# Collect history from the newest record
self.grab(pair,
from_dt=series_info['to_dt'],
to_dt=to_dt,
from_id=series_info['to_id'])
else:
# There is no newest or oldest bounds of empty collection
if isinstance(from_dt, str) or isinstance(to_dt, str):
raise Exception("%s - Collection empty - cannot auto-fill dates" % pair)
logger.debug("%s - Grabbing full", pair)
self.grab(pair,
from_dt=from_dt,
to_dt=to_dt)
logger.info("%s - Finished - %.2fs", pair, timer() - t)
def row(self, pairs, from_dt=None, to_dt=None, drop=False):
"""
Grabs data for each pair in a row
:param pairs: list of pairs or string command from ['db', 'ticker']
:param from_dt: date of the start point or command from ['oldest', 'newest']
:param to_dt: date of the end point or command from ['oldest', 'newest']
:param drop: delete underlying collection before insert
:return: None
"""
if isinstance(pairs, str):
# All pairs in db
if pairs == 'db':
pairs = self.mongo_ts.list_cols()
# All pairs in ticker
elif pairs == 'ticker':
pairs = self.ticker_pairs()
else:
regex = re.compile(pairs)
pairs = list(filter(regex.search, self.ticker_pairs()))
if len(pairs) == 0:
raise Exception("List of pairs must be non-empty")
for pair in pairs:
t = timer()
self.one(pair, from_dt=from_dt, to_dt=to_dt, drop=drop)
def ring(self, pairs, every=None):
"""
Grabs the most recent data for a row of pairs on repeat
Requires all pairs to be persistent in the database
:param pairs: list of pairs or 'db' command
:param every: pause between iterations
:return: None
"""
if isinstance(pairs, str):
# All pairs in db
if pairs == 'db':
pairs = self.mongo_ts.list_cols()
else:
regex = re.compile(pairs)
pairs = list(filter(regex.search, self.ticker_pairs()))
if len(pairs) == 0:
raise Exception("List of pairs must be non-empty")
while True:
# Collect head every time interval
self.row(pairs, to_dt=now())
if every is not None:
sleep(every)
|
gpl-3.0
|
drummonds/remittance
|
remittance/utils.py
|
1
|
1450
|
__author__ = 'Humphrey'
from decimal import Decimal, InvalidOperation
import pandas as pd
from sys import exc_info
one_pence = Decimal('0.01')
def p(value):
"""Convert `value` to Decimal pence implementing AIS rounding (up) or cents"""
# TODO think about Decimal(-0.00) == Decmial(0.00) which is true. Should I try and convert -0 to +0?
# I think probably better yes
try:
# If user_or_username is a User object
test = Decimal(Decimal(float(value)) * Decimal(100)).quantize(one_pence)
i, d = divmod(test, 1)
if abs(d) == Decimal(0.50):
# Implement rounding
if value > 0:
result = (Decimal(value) + Decimal(0.0025)).quantize(one_pence)
else:
result = (Decimal(value) - Decimal(0.0025)).quantize(one_pence)
# print('p Rounding |{}| to {}'.format(value, result))
else:
result = Decimal(float(value)).quantize(one_pence)
except InvalidOperation:
print('Invalid Operation Val = |{}|'.format(value))
t, v, tb = exc_info()
raise v.with_traceback(tb)
except TypeError: # Oops -- didn't works. ask forgiveness ;-)
t, v, tb = exc_info()
if isinstance(value, pd.Series):
result = [p(x) for x in value]
else:
print('Type Error Val = |{}|, {}'.format(value, type(value)))
raise v.with_traceback(tb)
return result
|
mit
|
ishanic/scikit-learn
|
sklearn/ensemble/partial_dependence.py
|
251
|
15097
|
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
bsd-3-clause
|
gmsn-ita/vaspirin
|
scripts/band_offsets.py
|
2
|
8803
|
#/usr/bin/env python3
# coding: utf-8
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import argparse
import sys
import re
class PyplotConst (object):
"""
Constants for style-plotting with matplotlib.pyplot
"""
lineStyles = ['-', '--', '-.', ':', 'None', ' ', '']
labelFont = {'fontname' : 'Linux Biolinum',
#~ 'weight' : 'bold',
'size' : 14 }
params = {'mathtext.default': 'regular' }
class Offsets (object):
"""
Defines the band offset of a single compound specified within the ALIGNMENTS file
"""
def __init__ (self, label, vbm, cbm):
self.label = label
"""
Defines a label for the compound
"""
self.vbm = float(vbm)
"""
Defines the valence band maximum (ionization energy)
"""
self.cbm = float(cbm)
"""
Defines the conduction band minimum (electron affinity)
"""
self.vacum = 0
"""
Defines the vacuum level for this material
"""
self.color = 'black'
"""
Specifies the color for this band discontinuities
"""
def setVacuum (self, vac):
"""
Sets the vacuum potential for this material
"""
self.vacuum = float (vac)
def setColor (self, color):
"""
Sets a color for this material representation
"""
self.color = color
def importAlignmentsList (fAlignments):
'''
The file ALIGNMENTS contains information on how to describe band offsets between the interfaces. Its format must be several lines organized as something like:
Label VBM CBM color
Lines with --- and ... are band offsets dividers, representing dashed and dotted lines.
For example:
MoS$_2$ -5.795 -4.016 red
--- red
MoS$_2$ -5.819 -4.075 black
SnS$_2$ -6.662 -5.120 black
--- blue
SnS$_2$ -6.586 -5.063 blue
'''
## Opens the ALIGNMENTS file
try:
with open(fAlignments,'r') as f:
inputFile = f.read().strip()
lines = [x for x in inputFile.split('\n') if x] # removes repeated \n ''
except FileNotFoundError:
print ('Alignments file not found. Please specify a valid filename.')
sys.exit()
offsetsList = []
## Each line is a band offset or a ligature
for eachLine in lines:
## The first information should be the material
offsetLabel = re.split(' +', eachLine.strip())[0]
if offsetLabel in PyplotConst.lineStyles:
## line format: LINE_STYLE COLOR
try:
color = re.split(' +', eachLine.strip())[1]
except IndexError:
print ('Invalid file formatting. Please correct it and try again.')
sys.exit(1)
## Creates an object which is only a ligature
newOffset = Offsets (offsetLabel, 0, 0)
else:
## line format: LABEL VBM CBM COLOR
try:
vbm = re.split(' +', eachLine.strip())[1]
cbm = re.split(' +', eachLine.strip())[2]
color = re.split(' +', eachLine.strip())[3]
except IndexError:
print ('Invalid file formatting. Please correct it and try again.')
sys.exit(1)
## Creates an object which is a band offset
newOffset = Offsets (offsetLabel, vbm, cbm)
newOffset.setColor(color)
offsetsList.append(newOffset)
return offsetsList
def printOffsetsList (offsetsList, args):
'''
A list of band offsets is received and printed using matplotlib.pyplot.
Lines joining different band offsets should not be at the end or beginning of the lists
'''
last_x = 0
offset_x_length = 1
offset_linewidth = 3
ligature_x_length = 0.4
ligature_linewidth = 1.5
axes_margin = 0.25
## Then, the offsets are plotted
for i in range (len(offsetsList)):
## Plot ligature
if offsetsList[i].label in PyplotConst.lineStyles:
## Reads the offsets from the last and the next one
vbm_last = offsetsList[i-1].vbm
cbm_last = offsetsList[i-1].cbm
vbm_next = offsetsList[i+1].vbm
cbm_next = offsetsList[i+1].cbm
labelInterface = offsetsList[i-1].label + '/' + offsetsList[i+1].label
if offsetsList[i].label == '-':
## Plot line joining adjacent VBM
plt.plot ([last_x, last_x], [vbm_last, vbm_next],
linestyle = offsetsList[i].label,
color = offsetsList[i].color,
linewidth = offset_linewidth)
## Plot line joining adjacent CBM
plt.plot ([last_x, last_x], [cbm_last, cbm_next],
linestyle = offsetsList[i].label,
color = offsetsList[i].color,
linewidth = offset_linewidth)
## Label interface
## label becomes aligned in the center if there is only one interface (two materials)
if args.number == 2:
plt.annotate (labelInterface, (last_x, (max(vbm_last, vbm_next)+ min(cbm_last, cbm_next))/2),
horizontalalignment='center', verticalalignment='center', **PyplotConst.labelFont)
last_x = last_x
else:
## Plot line joining VBM with stylish lines
plt.plot ([last_x, last_x + ligature_x_length], [vbm_last, vbm_next],
linestyle = offsetsList[i].label,
color = offsetsList[i].color,
linewidth = ligature_linewidth)
## Plot line joining CBM with stylish lines
plt.plot ([last_x, last_x + ligature_x_length], [cbm_last, cbm_next],
linestyle = offsetsList[i].label,
color = offsetsList[i].color,
linewidth = ligature_linewidth)
last_x = last_x + ligature_x_length
## Or plot offset
else:
vbm = offsetsList[i].vbm
cbm = offsetsList[i].cbm
## Plot VBM
plt.plot ([last_x, last_x + offset_x_length], [vbm, vbm],
linestyle = '-',
color = offsetsList[i].color,
linewidth = offset_linewidth)
plt.annotate (("% .2f" % offsetsList[i].vbm), (last_x + offset_x_length/2, vbm - 0.03), horizontalalignment='center', verticalalignment='top', **PyplotConst.labelFont)
## Plot CBM
plt.plot ([last_x, last_x + offset_x_length], [cbm, cbm],
linestyle = '-',
color = offsetsList[i].color,
linewidth = offset_linewidth)
plt.annotate (("% .2f" % offsetsList[i].cbm), (last_x + offset_x_length/2, cbm + 0.02), horizontalalignment='center', verticalalignment='bottom', **PyplotConst.labelFont)
plt.annotate (offsetsList[i].label, (last_x + offset_x_length/2, (vbm+cbm)/2),
horizontalalignment='center', verticalalignment='center', **PyplotConst.labelFont)
last_x = last_x + offset_x_length
## A set of pyplot configurations is passed
plt.margins (axes_margin)
plt.rcParams.update(PyplotConst.params)
## Turn on the axis if requested
if args.axis:
plt.axis('on')
else:
plt.axis('off')
plt.savefig (args.output)
## Finally, the plot is shown
if args.show:
plt.show()
def parseArgs():
"""
Parse arguments from the command line. Uses the `argparse` package to
establish all positional and optional arguments.
"""
parser = argparse.ArgumentParser(description='Easy script to create band alignments',
epilog= "Written by Daniel S. Koda (feb. 2017).",
prog="band_offsets.py")
parser.add_argument('input_file', default='ALIGNMENTS', help="ALIGNMENTS input file")
parser.add_argument('-o', '--output', default='offsets.png', help="output name for the generated files with its format. Default: offsets.png")
parser.add_argument('-v', '--vacuum', type=float, default=0.0, help="vacuum dipole step (Default: 0.0)")
parser.add_argument('-q', '--quiet', action='store_true', help="do not display text on the output window (default: False)")
parser.add_argument('-x', '--axis', action='store_true', help="turn on the axis for the plot (default: False)")
parser.add_argument('-s', '--show', action='store_true', help="show the band alignments before saving to file (default: False)")
parser.add_argument('-n', '--number', type=int, default=2, help="number of materials composing the interface (Default: 2)")
return parser.parse_args()
def printRunDescription (args):
'''
Print description of the options chosen and the crystals input.
'''
leftJustSpace = 20
print ("input file:".ljust(leftJustSpace) + "%s" % args.input_file)
print ("output file:".ljust(leftJustSpace) + "%s" % args.output)
print ("vacuum step:".ljust(leftJustSpace) + "% .2f" % args.vacuum)
def main():
'''
Rotates a molecule in a POSCAR file by angleDegrees
The refAtom is taken as reference to the rotation
'''
args = parseArgs()
if not args.quiet:
print ("*****************************")
print (" vaspirin v2.0: band_offsets ")
print ("*****************************")
printRunDescription (args)
## Open the input file and creates an offsets list
offsetsList = importAlignmentsList (args.input_file)
## Plots the artistic band offsets
printOffsetsList (offsetsList, args)
if __name__ == "__main__":
main ()
|
gpl-3.0
|
bthirion/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
28
|
3697
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
kaichogami/scikit-learn
|
examples/applications/plot_outlier_detection_housing.py
|
28
|
5563
|
"""
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
|
bsd-3-clause
|
MalkIPP/openfisca-france-data
|
openfisca_france_data/sources/utils.py
|
4
|
2153
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pandas import HDFStore, read_csv
def csv2hdf5(csv_name, h5_name, dfname, option='frame'):
"""
Convert a csv file to a dataframe in a hdf5
Parameters:
csv_name: string
csv file name
h5_name : string
hdf5 file name
dfname : string
dataframe name
option : string, 'frame' or 'table', default to 'frame'
stoing type in the pytable
"""
table = read_csv(csv_name)
store = HDFStore(h5_name)
if option == 'frame':
store.put(dfname, table)
elif option == 'table': # for frame_table à la pytables
object_cols = table.dtypes[ table.dtypes == 'object']
print object_cols.index
try:
store.append(dfname,table)
except:
print table.get_dtype_counts()
object_cols = table.dtypes[ table.dtypes == 'object']
for col in object_cols.index:
print 'removing object column :', col
del table[col]
store.append(dfname,table)
print store
store.close()
def test_hdf5(h5_name):
store = HDFStore(h5_name)
for key in store.keys():
print key
store.close()
if __name__ == '__main__':
pass
|
agpl-3.0
|
madmax983/h2o-3
|
h2o-py/tests/testdir_algos/kmeans/pyunit_DEPRECATED_prostateKmeans.py
|
2
|
1068
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn.cluster import KMeans
def prostateKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing prostate.csv data...\n")
prostate_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
#prostate.summary()
prostate_sci = np.loadtxt(pyunit_utils.locate("smalldata/logreg/prostate_train.csv"), delimiter=',', skiprows=1)
prostate_sci = prostate_sci[:,1:]
for i in range(5,9):
#Log.info(paste("H2O K-Means with ", i, " clusters:\n", sep = ""))
#Log.info(paste( "Using these columns: ", colnames(prostate.hex)[-1]) )
prostate_km_h2o = h2o.kmeans(x=prostate_h2o[1:], k=i)
prostate_km_h2o.show()
prostate_km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
prostate_km_sci.fit(prostate_sci)
print prostate_km_sci.cluster_centers_
if __name__ == "__main__":
pyunit_utils.standalone_test(prostateKmeans)
else:
prostateKmeans()
|
apache-2.0
|
piotroxp/scibibscan
|
scib/lib/python3.5/site-packages/numpy/core/tests/test_multiarray.py
|
9
|
223106
|
from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5864
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = np.dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): # subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += np.dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return np.dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p'))
_view_is_safe(np.dtype('O,O'), np.dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(np.dtype('p,p'), np.dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - np.dtype(otype).itemsize+1):
d2 = np.dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = np.dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
|
mit
|
GeoscienceAustralia/sifra
|
tests/test_input_model_excel_file.py
|
1
|
11983
|
import os
import unittest as ut
import pandas as pd
import logging
rootLogger = logging.getLogger(__name__)
rootLogger.setLevel(logging.CRITICAL)
class TestReadingExcelFile(ut.TestCase):
def setUp(self):
self.project_root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.model_xlsx_files = []
for root, dir_names, file_names in os.walk(self.project_root_dir):
for file_name in file_names:
if "models" in root:
if ".xlsx" in file_name:
self.model_xlsx_files.append(os.path.join(root, file_name))
self.required_sheets = ['component_list',
'component_connections',
'supply_setup',
'output_setup',
'comp_type_dmg_algo',
'damage_state_def']
def test_folder_structure(self):
# self.assertTrue(
# os.path.isdir(os.path.join(self.project_root_dir, "models")),
# "core models folder not found at " + self.project_root_dir + "!"
# )
self.assertTrue(
os.path.isdir(os.path.join(self.project_root_dir, "tests", "models")),
"test models folder not found at " + self.project_root_dir + "!"
)
# self.assertTrue(
# os.path.isdir(os.path.join(self.project_root_dir, "simulation_setup")),
# "core simulation setup folder not found at " + self.project_root_dir + "!"
# )
self.assertTrue(
os.path.isdir(os.path.join(self.project_root_dir, "tests", "simulation_setup")),
"test simulation setup folder not found at " + self.project_root_dir + "!"
)
def test_model_files_exists(self):
for model_file in self.model_xlsx_files:
self.assertTrue(
os.path.isfile(model_file),
"Model excel file not found on path at" + model_file + " !"
)
def test_required_sheets_exist(self):
for model_file in self.model_xlsx_files:
rootLogger.info(model_file)
df = pd.read_excel(model_file, None)
# check if the sheets is a subset of the required sheets
self.assertTrue(set(self.required_sheets) <= set(df.keys()), "Required sheet name not found!")
def test_reading_data_from_component_list(self):
for model_file in self.model_xlsx_files:
component_list = pd.read_excel(model_file,
sheet_name='component_list',
header=0,
skiprows=0,
index_col=None,
skipinitialspace=True)
self.assertTrue(isinstance(len(component_list.index.tolist()), int))
def test_reading_data_from_component_connections(self):
required_col_names = ['origin', 'destination', 'link_capacity', 'weight']
for model_file in self.model_xlsx_files:
component_connections = pd.read_excel(model_file,
sheet_name='component_connections',
header=0,
skiprows=0,
index_col=None,
skipinitialspace=True)
self.assertTrue(set(required_col_names) <= set(component_connections.columns.values.tolist()),
"Required column name not found!")
for index, connection_values in component_connections.iterrows():
self.assertTrue(isinstance(connection_values['origin'], unicode or str))
self.assertTrue(isinstance(float(connection_values['link_capacity']), float))
self.assertTrue(isinstance(float(connection_values['weight']), float))
self.assertTrue(isinstance((connection_values['destination']), unicode or str))
def test_reading_data_from_supply_setup(self):
# index coloum ingnored : 'input_node'
required_col_names = ['input_capacity', 'capacity_fraction', 'commodity_type']
for model_file in self.model_xlsx_files:
supply_setup = pd.read_excel(model_file,
sheet_name='supply_setup',
index_col=0,
header=0,
skiprows=0,
skipinitialspace=True)
self.assertTrue(set(required_col_names) <= set(supply_setup.columns.tolist()),
"Required column name not found!" +
"col expected: "+str(required_col_names) +
"col supplied: "+str(supply_setup.columns.values.tolist()) + '\n' +
"file name : " + model_file)
for index, supply_values in supply_setup.iterrows():
self.assertTrue(isinstance(float(supply_values['input_capacity']), float))
self.assertTrue(isinstance((float(supply_values['capacity_fraction'])), float))
self.assertTrue(isinstance((supply_values['commodity_type']), unicode or str))
self.assertTrue(isinstance((index[0]), unicode or str))
def test_reading_data_from_output_setup(self):
# index column ignored : 'output_node'
required_col_names = ['production_node',
'output_node_capacity',
'capacity_fraction',
'priority']
for model_file in self.model_xlsx_files:
output_setup = pd.read_excel(model_file,
sheet_name='output_setup',
header=0,
skiprows=0,
skipinitialspace=True)
self.assertTrue(set(required_col_names) <= set(output_setup.columns.tolist()),
"Required column name not found!" + '\n' +
"col expected: " + str(required_col_names) + '\n' +
"col supplied: " + str(output_setup.columns.values.tolist()) + '\n' +
"file name : " + model_file)
self.assertTrue(output_setup['output_node_capacity'].sum() > 0)
for index, output_values in output_setup.iterrows():
self.assertTrue(isinstance((output_values['production_node']), unicode or str))
self.assertTrue(isinstance((float(output_values['output_node_capacity'])), float))
self.assertTrue(isinstance((float(output_values['capacity_fraction'])), float))
self.assertTrue(isinstance(int(output_values['priority']), int))
def test_reading_data_from_comp_type_dmg_algo(self):
# there can be arbitrary number of coloums to supply parameters for specific functions
required_col_names = ['is_piecewise',
'damage_function',
'damage_ratio',
'functionality',
'recovery_function',
'recovery_mean',
'recovery_std']
for model_file in self.model_xlsx_files:
comp_type_dmg_algo = pd.read_excel(model_file,
sheet_name='comp_type_dmg_algo',
index_col=[0, 1, 2],
header=0,
skiprows=0,
skipinitialspace=True)
self.assertTrue(set(required_col_names) <= set(comp_type_dmg_algo.columns.tolist()),
"Required column name not found!" + '\n' +
"col expected: " + str(required_col_names) + '\n' +
"col supplied: " + str(comp_type_dmg_algo.columns.values.tolist()) + '\n' +
"file name : " + model_file)
# current implemented function
possible_values_of_damage_function = ["StepFunc",
"LogNormalCDF",
"Lognormal",
"NormalCDF",
"ConstantFunction",
"Level0Response",
"Level0Recovery",
"PiecewiseFunction",
"RecoveryFunction"]
for index, damage_state in comp_type_dmg_algo.iterrows():
# id
# self.assertTrue(isinstance((index[0]), int))
# component_type
self.assertTrue(isinstance((index[1]), unicode or str))
# damage_state
self.assertTrue(isinstance((index[2]), unicode or str))
self.assertTrue(
str(damage_state['damage_function']) in
set(possible_values_of_damage_function),
"Required damage_function name not found!" + '\n' +
"damage_function expected names: " +
str(possible_values_of_damage_function) + '\n' +
"damage_function name supplied: " +
str(damage_state['damage_function']) + '\n' +
"file name : " + model_file
)
self.assertTrue(isinstance(damage_state['is_piecewise'], unicode or str))
self.assertTrue(isinstance(damage_state['damage_function'], unicode or str))
self.assertTrue(isinstance(float(damage_state['damage_ratio']), float))
self.assertTrue(isinstance(float(damage_state['functionality']), float))
self.assertTrue(isinstance(float(damage_state['recovery_mean']), float))
self.assertTrue(isinstance(float(damage_state['recovery_std']), float))
# self.assertTrue(isinstance(float(damage_state['recovery_95percentile']), float))
# TODO damage_state['fragility_source'] not used in code
# self.assertTrue(isinstance(str(damage_state['fragility_source']), unicode or str), type(damage_state['fragility_source']))
def test_reading_data_from_damage_state_def(self):
for model_file in self.model_xlsx_files:
damage_state_def = pd.read_excel(model_file,
sheet_name='damage_state_def',
index_col=[0, 1],
header=0,
skiprows=0,
skipinitialspace=True)
for index, damage_def in damage_state_def.iterrows():
self.assertTrue(isinstance(index[0], unicode or str), type(index[0]))
self.assertTrue(isinstance(index[1], unicode or str), str(index[1]))
# TODO excel files not in standard form -- need to standardise
# self.assertTrue(isinstance(damage_def['damage_state_definition'], unicode or str or numpy.float64), type(damage_def['damage_state_definition'])+model_file)
# self.assertTrue(isinstance(str(damage_def['fragility_source']), unicode or str), model_file)
if __name__ == "__main__":
ut.main()
|
apache-2.0
|
Vimos/scikit-learn
|
sklearn/model_selection/tests/test_validation.py
|
7
|
42247
|
"""Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
n_samples = 30
n_splits = 3
X, y = make_classification(n_samples=n_samples, n_features=1,
n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=KFold(n_splits=n_splits),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
# Test a custom cv splitter that can iterate only once
with warnings.catch_warnings(record=True) as w:
train_sizes2, train_scores2, test_scores2 = learning_curve(
estimator, X, y,
cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores2, train_scores)
assert_array_almost_equal(test_scores2, test_scores)
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def check_cross_val_predict_with_method(est):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
# Test alternative representations of y
predictions_y1 = cross_val_predict(est, X, y + 1, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y1)
predictions_y2 = cross_val_predict(est, X, y - 2, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y2)
predictions_ystr = cross_val_predict(est, X, y.astype('str'),
method=method, cv=kfold)
assert_array_equal(predictions, predictions_ystr)
def test_cross_val_predict_with_method():
check_cross_val_predict_with_method(LogisticRegression())
def test_gridsearchcv_cross_val_predict_with_method():
est = GridSearchCV(LogisticRegression(random_state=42),
{'C': [0.1, 1]},
cv=2)
check_cross_val_predict_with_method(est)
def get_expected_predictions(X, y, cv, classes, est, method):
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
expected_predictions_ = func(X[test])
# To avoid 2 dimensional indexing
exp_pred_test = np.zeros((len(test), classes))
if method is 'decision_function' and len(est.classes_) == 2:
exp_pred_test[:, est.classes_[-1]] = expected_predictions_
else:
exp_pred_test[:, est.classes_] = expected_predictions_
expected_predictions[test] = exp_pred_test
return expected_predictions
def test_cross_val_predict_class_subset():
X = np.arange(8).reshape(4, 2)
y = np.array([0, 0, 1, 2])
classes = 3
kfold3 = KFold(n_splits=3)
kfold4 = KFold(n_splits=4)
le = LabelEncoder()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
# Test with n_splits=3
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
# Runs a naive loop (should be same as cross_val_predict):
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Test with n_splits=4
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold4)
expected_predictions = get_expected_predictions(X, y, kfold4, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Testing unordered labels
y = [1, 1, -4, 6]
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
y = le.fit_transform(y)
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
def test_permutation_test_score_pandas():
# check permutation_test_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
iris = load_iris()
X, y = iris.data, iris.target
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
permutation_test_score(clf, X_df, y_ser)
|
bsd-3-clause
|
B3AU/waveTree
|
examples/cluster/plot_lena_compress.py
|
8
|
2198
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import pylab as pl
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
pl.figure(1, figsize=(3, 2.2))
pl.imshow(lena, cmap=pl.cm.gray, vmin=vmin, vmax=256)
# compressed lena
pl.figure(2, figsize=(3, 2.2))
pl.imshow(lena_compressed, cmap=pl.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
pl.figure(3, figsize=(3, 2.2))
pl.imshow(regular_lena, cmap=pl.cm.gray, vmin=vmin, vmax=vmax)
# histogram
pl.figure(4, figsize=(3, 2.2))
pl.clf()
pl.axes([.01, .01, .98, .98])
pl.hist(X, bins=256, color='.5', edgecolor='.5')
pl.yticks(())
pl.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
pl.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
pl.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
pl.show()
|
bsd-3-clause
|
geledek/mrec
|
mrec/item_similarity/knn.py
|
3
|
3868
|
"""
Brute-force k-nearest neighbour recommenders
intended to provide evaluation baselines.
"""
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from recommender import ItemSimilarityRecommender
class KNNRecommender(ItemSimilarityRecommender):
"""
Abstract base class for k-nn recommenders. You must supply an
implementation of the compute_all_similarities() method.
Parameters
==========
k : int
The number of nearest neighbouring items to retain
"""
def __init__(self,k):
self.k = k
def compute_similarities(self,dataset,j):
A = dataset.X
a = dataset.fast_get_col(j)
d = self.compute_all_similarities(A,a)
d[j] = 0 # zero out self-similarity
# now zero out similarities for all but top-k items
nn = d.argsort()[-1:-1-self.k:-1]
w = np.zeros(A.shape[1])
w[nn] = d[nn]
return w
def compute_all_similarities(self,A,a):
"""
Compute similarity scores between item vector a
and all the rows of A.
Parameters
==========
A : scipy.sparse.csr_matrix
Matrix of item vectors.
a : array_like
The item vector to be compared to each row of A.
Returns
=======
similarities : numpy.ndarray
Vector of similarity scores.
"""
pass
class DotProductKNNRecommender(KNNRecommender):
"""
Similarity between two items is their dot product
(i.e. cooccurrence count if input data is binary).
"""
def compute_all_similarities(self,A,a):
return A.T.dot(a).toarray().flatten()
def __str__(self):
return 'DotProductKNNRecommender(k={0})'.format(self.k)
class CosineKNNRecommender(KNNRecommender):
"""
Similarity between two items is their cosine distance.
"""
def compute_all_similarities(self,A,a):
return cosine_similarity(A.T,a.T).flatten()
def __str__(self):
return 'CosineKNNRecommender(k={0})'.format(self.k)
if __name__ == '__main__':
# use knn models like this:
import random
import StringIO
from mrec import load_fast_sparse_matrix
random.seed(0)
print 'loading test data...'
data = """\
%%MatrixMarket matrix coordinate real general
3 5 9
1 1 1
1 2 1
1 3 1
1 4 1
2 2 1
2 3 1
2 5 1
3 3 1
3 4 1
"""
print data
dataset = load_fast_sparse_matrix('mm',StringIO.StringIO(data))
num_users,num_items = dataset.shape
model = CosineKNNRecommender(k=2)
num_samples = 2
def output(i,j,val):
# convert back to 1-indexed
print '{0}\t{1}\t{2:.3f}'.format(i+1,j+1,val)
print 'computing some item similarities...'
print 'item\tsim\tweight'
# if we want we can compute these individually without calling fit()
for i in random.sample(xrange(num_items),num_samples):
for j,weight in model.get_similar_items(i,max_similar_items=2,dataset=dataset):
output(i,j,weight)
print 'learning entire similarity matrix...'
# more usually we just call train() on the entire dataset
model = CosineKNNRecommender(k=2)
model.fit(dataset)
print 'making some recommendations...'
print 'user\trec\tscore'
for u in random.sample(xrange(num_users),num_samples):
for i,score in model.recommend_items(dataset.X,u,max_items=10):
output(u,i,score)
print 'making batch recommendations...'
recs = model.batch_recommend_items(dataset.X)
for u in xrange(num_users):
for i,score in recs[u]:
output(u,i,score)
print 'making range recommendations...'
for start,end in [(0,2),(2,3)]:
recs = model.range_recommend_items(dataset.X,start,end)
for u in xrange(start,end):
for i,score in recs[u-start]:
output(u,i,score)
|
bsd-3-clause
|
DarkEnergyScienceCollaboration/Twinkles
|
python/desc/twinkles/analyseICat.py
|
2
|
1865
|
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
def readPhoSimInstanceCatalog(fname,
names=['obj', 'SourceID', 'RA', 'DEC', 'MAG_NORM',\
'SED_NAME', 'REDSHIFT', 'GAMMA1',\
'GAMMA2', 'MU', 'DELTA_RA', 'DELTA_DEC',\
'SOURCE_TYPE', 'DUST_REST_NAME',\
'Av', 'Rv', 'Dust_Lab_Name', 'EBV']):
"""
read the phoSimInstanceCatalog and return the contents
Parameters
----------
fname : mandatory, string
filename of the phosim instance catalog
names : a list of column names matching the number of columns
Returns
-------
A `pandas.DataFrame` with the phosim Instance Catalog with metadata
accessed as a dictionary through the meta attribute of the return.
"""
# read the header into a metadata list, and get number of lines to skip
# for catalog
metalines = []
with open(fname) as f:
linenum = 0
for line in f:
if line.startswith('object'):
continue
metalines.append(line)
linenum += 1
# process the headers into a metadata list
meta = metadataFromLines(metalines)
# read the catalog into a dataframe
df = pd.read_csv(fname, skiprows=linenum, names=names, sep='\s+')
df.meta = meta
return df
def metadataFromLines(lines):
"""
process the metadata lines into a dictionary
"""
info = [line.split() for line in lines]
meta = {key: np.float(value) for key, value in info}
return meta
if __name__ == "__main__":
meta, df = readPhoSimInstanceCatalog('/Users/rbiswas/src/LSST/sims_catUtils/examples/SNOnlyPhoSimCatalog.dat')
print(df.head())
|
mit
|
MMKrell/pyspace
|
pySPACE/resources/dataset_defs/performance_result.py
|
1
|
75741
|
""" Tabular listing data sets, parameters and a huge number of performance metrics
Store and load the performance results of an operation from a csv file,
select subsets of this results or for create various kinds of plots
**Special Static Methods**
:merge_performance_results:
Merge result*.csv files when classification fails or is aborted.
:repair_csv:
Wrapper function for whole csv repair process when classification
fails or is aborted.
"""
from itertools import cycle
try: # import packages for plotting
import pylab
import matplotlib.pyplot
import matplotlib
# uncomment for nice latex output
# pylab.rc('text', usetex=True)
# font = {'family': 'serif',
# 'size': 14}
# pylab.rc('font', **font)
except:
pass
try: # import packages for plotting error bars
import scipy.stats
except:
pass
from collections import defaultdict
import numpy
import os
import glob
# imports for storing
import yaml
import warnings
import logging
# tools
import pySPACE.tools.csv_analysis as csv_analysis
from pySPACE.tools.conversion import python2yaml
# base class
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.tools.filesystem import get_author
# roc imports
import cPickle # load roc points
from operator import itemgetter
class PerformanceResultSummary(BaseDataset):
""" Classification performance results summary
For the identifiers some syntax rules hold to make some distinction:
1. Parameters/Variables start and end with `__`.
These identifiers define the processing differences of the entries.
Altogether the corresponding values build a unique key of each row.
2. Normal metrics start with a Big letter and
continue normally with small letters except AUC.
3. Meta metrics like training metrics, LOO metrics or soft metrics
start with small letters defining the category followed by a
`-` and continue with the detailed metric name.
4. Meta information like chosen optimal parameters can be
separated from metrics and variables using `~~`
at beginning and end of the information name.
This class can load a result tabular (namely the results.csv file) using
the factory method :func:`from_csv`.
Furthermore, the method :func:`project_onto` allows to select a subset of the
result collection where a parameter takes on a certain value.
The class contains various methods for plotting the loaded results.
These functions are used by the analysis operation and by the interactive
analysis GUI.
Mainly result collections are loaded for
:mod:`~pySPACE.missions.operations.comp_analysis`,
:mod:`~pySPACE.missions.operations.analysis` and
as best alternative with the :mod:`~pySPACE.run.gui.performance_results_analysis`.
They can be build e.g. with the :mod:`~pySPACE.missions.nodes.sink.classification_performance_sink` nodes,
with :ref:`MMLF <tutorial_interface_to_mmlf>` or with
:class:`~pySPACE.missions.operations.weka_classification.WekaClassificationOperation`.
The metrics as result of :mod:`~pySPACE.missions.nodes.sink.classification_performance_sink` nodes
are calculated in the :mod:`~pySPACE.resources.dataset_defs.metric` dataset module.
.. todo:: Access in result collection via indexing ndarray with one
dimension for each parameter.
Entries are indexes in list. So the corresponding values
can be accessed very fast.
.. todo:: Faster, memory efficient loading is needed. Pickling or new data
structure?
The class constructor expects the following **arguments**:
:data: A dictionary that contains a mapping from an attribute
(e.g. accuracy) to a list of values taken by this attribute.
An entry is the entirety of all i-th values over all dict-values
:tmp_pathlist:
List of files to be deleted after successful storing
When constructed via `from_multiple_csv` all included csv files
can be deleted after the collection is stored.
Therefore the parameter `delete` has to be active.
(*optional, default:None*)
:delete:
Switch for deleting files in `tmp_pathlist` after collection is stored.
(*optional, default: False*)
:Author: Mario M. Krell (mario.krell@dfki.de)
"""
def __init__(self, data=None, dataset_md=None, dataset_dir=None,
csv_filename=None, **kwargs):
super(PerformanceResultSummary, self).__init__()
if csv_filename and not dataset_dir: # csv_filename is expected to be a path
dataset_dir=""
self.delete = False
self.tmp_pathlist = None
if dataset_md != None:
self.meta_data.update(dataset_md)
if data != None:
self.data = data
elif dataset_dir != None: # load data
if csv_filename != None:
# maybe it's not results.csv but it's definitely only one file
self.data = PerformanceResultSummary.from_csv(os.path.join(dataset_dir,
csv_filename))
elif os.path.isfile(os.path.join(dataset_dir,"results.csv")):
# delegate to from_csv_method
csv_file_path = os.path.join(dataset_dir,"results.csv")
self.data = PerformanceResultSummary.from_csv(csv_file_path)
else: # multiple csv_files
self.data, self.tmp_pathlist = \
PerformanceResultSummary.from_multiple_csv(dataset_dir)
self.delete = True
# update meta data
try:
splits = max(map(int,self.data["__Key_Fold__"]))
runs = max(map(int,self.data["__Key_Run__"]))+1
except:
warnings.warn('Splits and runs not available!')
else:
self.meta_data.update({"splits": splits, "runs": runs})
else: # we have a problem
self._log("Result tabular could not be created - data is missing!",
level=logging.CRITICAL)
warnings.warn("Result tabular could not be created - data is missing!")
self.data = {}
# modifier for getting general box plots in Gui
if not self.data.has_key('None'):
self.data['None'] = ['All'] * len(self.data.values()[0])
self.identifiers = self.data.keys()
# indexed version of the data
self.data_dict = None
self.transform()
@staticmethod
def from_csv(csv_file_path):
""" Loading data from the csv file located under *csv_file_path* """
# # pickle loading
# try:
# if csv_file_path.endswith("pickle"):
# f = open(csv_file_path, "rb")
# elif csv_file_path.endswith("csv"):
# f = open(csv_file_path[:-3] + "pickle", 'rb')
# res=cPickle.load(f)
# f.close()
# return res
# except IOError:
# pass
data_dict = csv_analysis.csv2dict(csv_file_path)
PerformanceResultSummary.translate_weka_key_schemes(data_dict)
# # save better csv version
# f = open(csv_file_path[:-3] + "pickle", "wb")
# f.write(cPickle.dumps(res, protocol=2))
# f.close()
return data_dict
@staticmethod
def from_multiple_csv(input_dir):
""" All csv files in the only function parameter 'input_dir' are
combined to just one result collection
Deleting of files will be done in the store method, *after*
the result is stored successfully.
"""
# A list of all result files (one per classification process)
pathlist = glob.glob(os.path.join(input_dir,
"results_*"))
if len(pathlist) == 0:
warnings.warn(
'No files in the format "results_*" found for merging results!')
return
result_dict = None
# For all result files of the WEKA processes or hashed files
for input_file_name in pathlist:
# first occurrence
if result_dict is None:
result_dict = csv_analysis.csv2dict(input_file_name)
PerformanceResultSummary.transfer_Key_Dataset_to_parameters(
result_dict, input_file_name)
else:
result = csv_analysis.csv2dict(input_file_name)
PerformanceResultSummary.transfer_Key_Dataset_to_parameters(
result, input_file_name)
csv_analysis.extend_dict(result_dict,result,
retain_unique_items=True)
PerformanceResultSummary.translate_weka_key_schemes(result_dict)
return (result_dict, pathlist)
def transform(self):
""" Fix format problems like floats in metric columns and tuples instead of column lists """
for key in self.get_metrics():
if not type(self.data[key][0]) == float:
try:
l = [float(value) if not value == "" else 0
for value in self.data[key]]
self.data[key] = l
except:
warnings.warn("Metric %s has entry %s not of type float."%(
key,str(value)
))
for key in self.identifiers:
if not type(self.data[key]) == tuple:
self.data[key] = tuple(self.data[key])
@staticmethod
def merge_traces(input_dir):
""" Merge and store the classification trace files in directory tree
The collected results are stored in a common file in the *input_dir*.
"""
import cPickle
traces = dict()
long_traces = dict()
save_long_traces = True
sorted_keys = None
# save merged files to delete them later
merged_files = []
for dir_path, dir_names, files in os.walk(input_dir):
for filename in files:
if filename.startswith("trace_sp"):
pass
else:
continue
main_directory = dir_path.split(os.sep)[-3]
# needed in transfer_Key_Dataset_to_parameters
temp_key_dict = defaultdict(list)
# add a temporal Key_Dataset, deleted in next step
temp_key_dict["Key_Dataset"] = [main_directory]
# read parameters from key dataset
PerformanceResultSummary.transfer_Key_Dataset_to_parameters(
temp_key_dict,
input_file_name=os.path.join(dir_path, filename))
key_dict = dict([(key,value[0]) for key, value in
temp_key_dict.items()])
# add run/split identifiers
split_number = int(filename[8:-7]) # from trace_spX.pickle
key_dict["__Key_Fold__"] = split_number
# from persistency_runX
run_number = int(dir_path.split(os.sep)[-2][15:])
key_dict["__Key_Run__"] = run_number
# transfer keys to hashable tuple of values
# the keys should always be the same
if sorted_keys is None:
sorted_keys = sorted(key_dict.keys())
traces["parameter_keys"] = sorted_keys
long_traces["parameter_keys"] = sorted_keys
identifier = []
for key in sorted_keys:
identifier.append(key_dict[key])
# load the actual classification trace
trace = cPickle.load(open(dir_path + os.sep + filename, 'rb'))
traces[tuple(identifier)] = trace
merged_files.append(dir_path + os.sep + filename)
if save_long_traces:
try:
trace = cPickle.load(open(dir_path + os.sep +"long_"+ filename, 'rb'))
long_traces[tuple(identifier)] = trace
merged_files.append(dir_path + os.sep +"long_"+ filename)
except IOError:
save_long_traces = False
# clean up
if sorted_keys is not None:
name = 'traces.pickle'
result_file = open(os.path.join(input_dir, name), "wb")
result_file.write(cPickle.dumps(traces, protocol=2))
result_file.close()
if save_long_traces:
name = 'long_traces.pickle'
result_file = open(os.path.join(input_dir, name), "wb")
result_file.write(cPickle.dumps(long_traces, protocol=2))
result_file.close()
for temp_file in merged_files:
os.remove(temp_file)
@staticmethod
def translate_weka_key_schemes(data_dict):
""" Data dict is initialized as 'defaultdict(list)' and
so the append function will work on non existing keys.
"""
if not data_dict.has_key("Key_Scheme"):
return
for i,value in data_dict["Key_scheme"].iter():
# Some special cases
# For these cases we rewrite the value to be meaningful
# Important parts of "Key_Scheme_Options" will be added to "Key_Scheme"
# Furthermore we introduce numerous new variables to benchmark
value = value.split(".")[-1]
if value == "SMO":
options = data_dict["Key_Scheme_options"][i]
options = options.split()
data_dict["__Classifier_Type__"].append(value)
for token in options:
# Search kernel type
if token.count("supportVector") >=1:
kernel_type = token.split(".")[-1]
data_dict["Kernel_Type"].append(kernel_type)
break
# Search complexity
for index, token in enumerate(options):
if token.count("-C") >=1:
complexity = options[index + 1]
data_dict["__Complexity__"].append(complexity)
# Add to value the complexity
value += " C=%s"
break
if kernel_type == 'PolyKernel':
# Search exponent in options of PolyKernel
exponent = options[options.index("-E") + 1]
if "\\" in exponent:
exponent = exponent.split("\\")[0]
#Add Kernel Type and Exponent to value
data_dict["__Kernel_Exponent__"].append(exponent)
if not exponent == "0":
value += " %s Exp=%s" % (kernel_type, exponent)
else:
value += " linear"
# unimportant parameter
data_dict["__Kernel_Gamma__"].append(0.0)
elif kernel_type == 'RBFKernel':
# Search gamma in options of RBFKernel
gamma = options[options.index("-G") + 1]
if "\\" in gamma:
gamma = gamma.split("\\")[0]
data_dict["__Kernel_Gamma__"].append(gamma)
value += " %s G=%s" % (kernel_type, gamma)
# unimportant parameter
data_dict["__Kernel_Exponent__"].append(0.0)
else:
#TODO: Warning: unknown kernel
data_dict["__Kernel_Exponent__"].append(0.0)
data_dict["__Kernel_Gamma__"].append(0.0)
# parameters used additionally in libsvm
data_dict["__Kernel_Offset__"].append(0.0)
data_dict["__Kernel_Weight__"].append(0.0)
# LibSVM works the same way as SMO and comes with WEKA.
# For NodeChainOperations a better version is integrated in C++
# It has more options, especially to weight the classes, to make oversampling unnecessary
# When using nonlinear kernels,
# one should consider the influence of the offset and for polynomial k. the scaling factor gamma.
elif value == "LibSVM":
options = data_dict["Key_Scheme_options"][i]
weight = options.split("-W")[-1]
options = options.split()
for index, token in enumerate(options):
if token.count("-S") >=1:
# 0 -- C-SVC
# 1 -- nu-SVC
# 2 -- one-class SVM
# 3 -- epsilon-SVR
# 4 -- nu-SVR
classifier = options[index + 1]
if classifier == "0":
classifier ="C_CVC"
data_dict["__Classifier_Type__"].append(classifier)
value += " %s" % (classifier)
elif token.count("-K") >=1:
# 0 -- linear: u'*v
# 1 -- polynomial: (gamma*u'*v + coef0)^degree
# 2 -- radial basis function: exp(-gamma*|u-v|^2)
# 3 -- sigmoid: tanh(gamma*u'*v + coef0)
kernel = options[index + 1]
if kernel == "0":
kernel = "linear"
elif kernel == "1":
kernel = "polynomial"
elif kernel == "2":
kernel = "RBF"
elif kernel == "3":
kernel = "sigmoid"
data_dict["__Kernel_Type__"].append(kernel)
value += " %s" % (kernel)
elif token.count("-C") >=1:
complexity = options[index + 1]
data_dict["__Complexity__"].append(complexity)
value += " C=%s" % (complexity)
elif token.count("-D") >=1:
degree = options[index + 1]
data_dict["__Kernel_Exponent__"].append(degree)
if not degree == "0":
value += " Exp=%s" % (degree)
elif token.count("-G") >=1:
gamma = options[index + 1]
data_dict["__Kernel_Gamma__"].append(gamma)
if not gamma == "0.0":
value += " G=%s" % (gamma)
elif token.count("-R") >=1:
coef0 = options[index + 1]
data_dict["__Kernel_Offset__"].append(coef0)
if not coef0 == "0.0":
value += " c0=%s" % (coef0)
elif token.count("W")>=1:
if "\\" in weight:
weight = weight.split("\\\"")[1]
data_dict["__Kernel_Weight__"].append(weight)
if not weight == "1.0 1.0":
value += " W=%s" % (weight)
else:
# TODO: Warning: unknown classifier
# All parameters of the two integrated classifier to make analysis operation compatible with other classifiers
data_dict["__Kernel_Type__"].append(value)
data_dict["__Complexity__"].append(0.0)
data_dict["__Kernel_Exponent__"].append(0.0)
data_dict["__Kernel_Gamma__"].append(0.0)
data_dict["__Kernel_Offset__"].append(0.0)
data_dict["__Kernel_Weight__"].append(0.0)
del data_dict["Key_Scheme"]
## Done
@staticmethod
def merge_performance_results(input_dir, delete_files=False):
"""Merge result*.csv files when classification fails or is aborted.
Use function with the pathname where the csv-files are stored.
E.g., merge_performance_results('/Users/seeland/collections/20100812_11_18_58')
**Parameters**
:input_dir:
Contains a string with the path where csv files are stored.
:delete_files:
controls if the csv-files will be removed after merging has finished
(optional, default: False)
:Author: Mario Krell
:Created: 2011/09/21
"""
collection = PerformanceResultSummary(dataset_dir=input_dir)
collection.delete = delete_files
collection.store(input_dir)
@staticmethod
def repair_csv(path, num_splits=None, default_dict=None, delete_files=True):
"""Wrapper function for whole csv repair process when classification fails
or is aborted.
This function performs merge_performance_results, reporting and reconstruction of missing
conditions, and a final merge. As a result two files are written:
results.csv and repaired_results.csv to the path specified.
**Parameters**
:path:
String containing the path where the classification results are
stored. This path is also used for storing the resulting csv files.
:num_splits:
Number of splits used for classification. If not specified
this information is read out from the csv file of the merge_performance_results
procedure.
(optional, default: None)
:default_dict:
A dictionary specifying default values for missing
conditions. This dictionary can e.g. be constructed using
empty_dict(csv_dict) and subsequent modification, e.g.
default_dict['Metric'].append(0). This parameter is used in
reconstruct_failures.
(optional, default: None)
:delete_files:
Controls if unnecessary files are deleted by merge_performance_results and
check_op_libSVM.
(optional, default: True)
:Author: Mario Krell, Sirko Straube
:Created: 2010/11/09
"""
PerformanceResultSummary.merge_performance_results(path, delete_files=delete_files)
filename= path + '/results.csv'
csv_dict = csv_analysis.csv2dict(filename)
if not num_splits:
num_splits = int(max(csv_dict['__Key_Fold__']))
oplist= csv_analysis.check_op_libSVM(path, delete_file=delete_files)
failures = csv_analysis.report_failures(oplist, num_splits)
final_dict= csv_analysis.reconstruct_failures(csv_dict, failures,
num_splits, default_dict=default_dict)
csv_analysis.dict2csv(path + '/repaired_results.csv', final_dict)
def store(self, result_dir, name = "results", s_format = "csv", main_metric="Balanced_accuracy"):
""" Stores this collection in the directory *result_dir*.
In contrast to *dump* this method stores the collection
not in a single file but as a whole directory structure with meta
information etc.
**Parameters**
:result_dir: The directory in which the collection will be stored
:name: The name of the file in which the result file is stored.
(*optional, default: 'results'*)
:s_format: The format in which the actual data sets should be stored.
(*optional, default: 'csv'*)
:main_metric: Name of the metric used for the shortened stored file.
If no metric is given, no shortened version is stored.
(*optional, default: 'Balanced_accuracy'*)
"""
author = get_author()
# Update the meta data
self.update_meta_data({"type" : "result",
"storage_format": s_format,
"author" : author})
# file name in which the operation's results will be stored
output_file_name = os.path.join(result_dir,name + "." + s_format)
self._log("\tWriting results to %s ..." % output_file_name)
if s_format == "csv":
#Store meta data
BaseDataset.store_meta_data(result_dir,self.meta_data)
self.data.pop("None",False)
csv_analysis.dict2csv(output_file_name, self.data)
if main_metric in self.identifiers:
reduced_data = dict()
for key in self.get_variables():
try:
if len(list(set(self.data[key]))) > 1:
reduced_data[key] = self.data[key]
except TypeError:
if len(list(set([python2yaml(item) for item in self.data[key]]))) > 1:
reduced_data[key] = self.data[key]
reduced_data[main_metric] = self.data[main_metric]
metric_list = ["True_positives","True_negatives","False_negatives","False_positives"]
for metric in [x for x in self.data.keys() if x in metric_list]:
reduced_data[metric]=self.data[metric]
output_file_name = os.path.join(result_dir,"short_"+name + "." + s_format)
csv_analysis.dict2csv(output_file_name, reduced_data)
else:
self._log("The format %s is not supported!"%s_format, level=logging.CRITICAL)
return
if self.delete:
for temp_result_file in self.tmp_pathlist:
os.remove(temp_result_file)
@staticmethod
def transfer_Key_Dataset_to_parameters(data_dict, input_file_name=None):
if not data_dict.has_key("Key_Dataset"):
return data_dict
for key_dataset in data_dict["Key_Dataset"]:
if not "}{" in key_dataset and not input_file_name is None:
hash_name = input_file_name.split("test_")
if len(hash_name) > 1:
hash_name = hash_name[-1][:-4]
else:
hash_name = input_file_name.split("train_")[-1][:-4]
# hash_name = input_file_name.split("_")[-1][:-4]
result_folder_name = os.path.dirname(input_file_name)
with open(os.path.join(result_folder_name, hash_name, "metadata.yaml")) as metadata_file:
metadata = yaml.load(metadata_file)
parameter_settings = metadata.get("parameter_setting", {})
hide_parameters = metadata.get("hide_parameters", [])
if not "__Dataset__" in data_dict:
data_dict["__Dataset__"] = []
data_dict["__hash__"] = []
for key in parameter_settings:
if key not in hide_parameters:
data_dict[key] = []
data_dict["__Dataset__"].append(
metadata["input_collection_name"].strip(os.sep).split(
os.sep)[-1].strip("'}{").split("}{")[0])
for key in parameter_settings:
if key not in hide_parameters:
data_dict[key].append(parameter_settings[key])
data_dict["__hash__"].append(hash_name.strip("}{"))
else:
components = (key_dataset.strip("}{")).split("}{")
for index, attribute in enumerate(components):
if index >= 1:
# for compatibility with old data: index 1 might be the
# specification file name
if index == 1 and not ("#" in attribute):
attribute_key = "__Template__"
attribute_value = attribute
continue
try:
attribute_key, attribute_value = attribute.split("#")
except ValueError:
warnings.warn("\tValueError when splitting attributes!")
print "ValueError in result collection when splitting attributes."
continue
elif index == 0:
attribute_key = "__Dataset__"
attribute_value = attribute
data_dict[attribute_key].append(attribute_value)
del data_dict["Key_Dataset"]
return data_dict
def project_onto(self, proj_parameter, proj_values):
""" Project result collection onto a subset that fulfills all criteria
Project the result collection onto the rows where the parameter
*proj_parameter* takes on the value *proj_value*.
"""
if type(proj_values) != list:
proj_values = [proj_values]
projected_dict = defaultdict(list)
entries_added = False
for i in range(len(self.data[proj_parameter])):
if self.data[proj_parameter][i] in proj_values:
entries_added = True
for column_key in self.identifiers:
# will leave projection column in place if there are
# still different values for this parameter
if column_key == proj_parameter:
if len(proj_values) == 1: continue
projected_dict[column_key].append(self.data[column_key][i])
# If the projected_dict is empty we continue
if not entries_added:
return
return PerformanceResultSummary(projected_dict)
def get_gui_metrics(self):
""" Returns the columns in data that correspond to metrics for visualization.
This excludes 'Key_Dataset' and gui variables of the tabular,
"""
metrics = []
variables = self.get_gui_variables()
for key in self.identifiers:
if not(key in variables) or key in ['Key_Dataset']:
metrics.append(key)
# Add variables, that can be interpreted as metrics
if type(key) is str and \
(key in ['__Num_Retained_Features__',
'__Num_Eliminated_Sensors__']
or key.startswith("~") or "Pon" in key) \
and len(list(set(self.data[key]))) > 1 \
and not (key in metrics):
metrics.append(key)
return metrics
def get_metrics(self):
""" Returns the columns in data that are real metrics """
metrics = []
variables = self.get_variables()
for key in self.identifiers:
if not type(key) is str:
warnings.warn("Wrong key (%s) provided with type %s."
% (str(key), type(key)))
elif not(key in variables) and not key.startswith("~") and \
not key == "None":
metrics.append(key)
# Add variables, that can be interpreted as metrics
if key in ['__Num_Retained_Features__',
'__Num_Eliminated_Sensors__']:
metrics.append(key)
return metrics
def get_gui_variables(self):
""" Returns the column headings that correspond to 'variables' to be visualized in the Gui """
variables = []
for key in self.identifiers:
if not type(key) is str:
warnings.warn("Wrong key (%s) provided with type %s."
% (str(key), type(key)))
# special key to get box plots without parameter dependencies
elif (key == 'None' or (
(key in ['__Dataset__', 'Kernel_Weight', 'Complexity',
'Kernel_Exponent', 'Kernel_Gamma', 'Kernel_Offset',
'Classifier_Type', 'Kernel_Type', 'Key_Scheme',
'Key_Run', 'Key_Fold', 'Run', 'Split']
or key.startswith('__')
or key.startswith('~'))
and len(list(set(self.data[key]))) > 1)):
variables.append(key)
return variables
def get_variables(self):
""" Variables are marked with '__'
Everything else are metrics, meta metrics, or processing information.
"""
variables = []
for key in self.identifiers:
if not type(key) is str:
warnings.warn("Wrong key (%s) provided with type %s."
% (str(key), type(key)))
elif key.startswith('__'):
variables.append(key)
return variables
def get_parameter_values(self, parameter):
""" Returns the values that *parameter* takes on in the data """
return set(self.data[parameter])
def get_nominal_parameters(self, parameters):
""" Returns a generator over the nominal parameters in *parameters*
.. note:: Nearly same code as in *get_numeric_parameters*.
Changes in this method should be done also to this method.
"""
for parameter in parameters:
try:
# Try to create a float of the first value of the parameter
[float(value) for value in self.data[parameter]]
# No exception and enough entities thus a numeric attribute
if len(set(self.data[parameter])) >= 5:
continue
else:
yield parameter
except ValueError:
# This is not a numeric parameter, treat it as nominal
yield parameter
except KeyError:
# This exception should inform the user about wrong parameters
# in his YAML file.
import warnings
warnings.warn('The parameter "' + parameter
+ '" is not contained in the PerformanceResultSummary')
except IndexError:
# This exception informs the user about wrong parameters in
# his YAML file.
import warnings
warnings.warn('The parameter "' + parameter
+ '" has no values.')
def get_numeric_parameters(self, parameters):
""" Returns a generator over the numeric parameters in *parameters*
.. note:: Nearly same code as in *get_nominal_parameters*.
Changes in this method should be done also to this method.
"""
for parameter in parameters:
try:
# Try to create a float of the first value of the parameter
float(self.data[parameter][0])
# No exception and enough entities thus a numeric attribute
if len(set(self.data[parameter]))>=5:
yield parameter
else:
continue
except ValueError:
# This is not a numeric parameter, treat it as nominal
continue
except KeyError:
#"This exception should inform the user about wrong parameters
# in his YAML file."
import warnings
warnings.warn('The parameter "' + parameter
+ '" is not contained in the PerformanceResultSummary')
except IndexError:
#This exception informs the user about wrong parameters in
# his YAML file.
import warnings
warnings.warn('The parameter "' + parameter
+ '" has no values.')
def dict2tuple(self,dictionary):
""" Return dictionary values sorted by key names """
keys=sorted(dictionary.keys())
l=[]
for key in keys:
l.append(dictionary[key])
return tuple(l)
def get_indexed_data(self):
""" Take the variables and create a dictionary with variable entry tuples as keys """
# index keys
self.variables = sorted(self.get_variables())
# other keys
keys = [key for key in self.identifiers if not key in self.variables]
# final dictionary
data_dict = {}
for i in range(len(self.data[self.variables[0]])):
var_dict = {}
perf_dict = {}
# read out variable values
for variable in self.variables:
value = self.data[variable][i]
var_dict[variable] = value
perf_dict[variable] = value
# read out the rest
for key in keys:
perf_dict[key] = self.data[key][i]
# save it into dictionary by mapping values to tuple as key/index
data_dict[self.dict2tuple(var_dict)] = perf_dict
return data_dict
def get_performance_entry(self, search_dict):
""" Get the line in the data, which corresponds to the `search_dict` """
search_tuple = self.dict2tuple(search_dict)
if self.data_dict is None:
self.data_dict = self.get_indexed_data()
return self.data_dict.get(search_tuple,None)
def plot_numeric(self, axes, x_key, y_key, conditions=[]):
""" Creates a plot of the y_key for the given numeric parameter x_key.
A function that allows to create a plot that visualizes the effect
of differing one variable onto a second one (e.g. the effect of
differing the number of features onto the accuracy).
**Expected arguments**
:axes: The axes into which the plot is written
:x_key: The key of the dictionary whose values should be used as
values for the x-axis (the independent variable)
:y_key: The key of the dictionary whose values should be used as
values for the y-axis, i.e. the dependent variable
:conditions: A list of functions that need to be fulfilled in order to
use one entry in the plot. Each function has to take two
arguments: The data dictionary containing all entries and
the index of the entry that should be checked. Each condition
must return a boolean value.
"""
colors = cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k', 'brown', 'gray'])
linestyles = cycle(['-']*9 + ['--']*9 + [':']*9 + ['-.']*9)
curves = defaultdict(lambda : defaultdict(list))
for i in range(len(self.data[x_key])):
# Check is this particular entry should be used
if not all(condition(self.data, i) for condition in conditions):
continue
# Get the value of the independent variable for this entry
x_value = float(self.data[x_key][i])
# Attach the corresponding value to the respective partition
if y_key.count("#") == 0:
y_value = float(self.data[y_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = y_key.split("#")
y_value = float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
curves[y_key][x_value].append(y_value)
for y_key, curve in curves.iteritems():
curve_x = []
curve_y = []
for x_value, y_values in sorted(curve.iteritems()):
curve_x.append(x_value)
curve_y.append(y_values)
# Create an error bar plot
axes.errorbar(curve_x, map(numpy.mean, curve_y),
yerr=map(scipy.stats.sem, curve_y),
elinewidth = 1, capsize = 5, label=y_key,
color = colors.next(), linestyle=linestyles.next())
axes.set_xlabel(x_key)
if y_key.count("#") == 0:
axes.set_ylabel(y_key.strip("_").replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(y_key.split("#")))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
# Return figure name
return "_".join([y_key, x_key])
def plot_numeric_vs_numeric(self, axes, axis_keys, value_key, scatter=True):
""" Contour plot of the value_key for the two numeric parameters axis_keys.
A function that allows to create a contour plot that visualizes the effect
of differing two variables on a third one (e.g. the effect of differing
the lower and upper cutoff frequency of a bandpass filter onto
the accuracy).
**Parameters**
:axes: The axes into which the plot is written
:axis_keys: The two keys of the dictionary that are assumed to have \
an effect on a third variable (the dependent variable)
:value_key: The dependent variables whose values determine the \
color of the contour plot
:scatter: Plot nearly invisible dots behind the real data points.
(*optional, default: True*)
"""
assert(len(axis_keys) == 2)
# Determine a sorted list of the values taken on by the axis keys:
x_values = set([float(value) for value in self.data[axis_keys[0]]])
x_values = sorted(list(x_values))
y_values = set([float(value) for value in self.data[axis_keys[1]]])
y_values = sorted(list(y_values))
#Done
# We cannot create a contour plot if one dimension is only 1d
if len(x_values) == 1 or len(y_values) == 1:
return
# Create a meshgrid of them
X, Y = pylab.meshgrid(x_values, y_values)
# Determine the average value taken on by the dependent variable
# for each combination of the the two source variables
Z = numpy.zeros((len(x_values),len(y_values)))
counter = numpy.zeros((len(x_values),len(y_values)))
for i in range(len(self.data[axis_keys[0]])):
x_value = float(self.data[axis_keys[0]][i])
y_value = float(self.data[axis_keys[1]][i])
if value_key.count("#") == 0:
performance_value = float(self.data[value_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
performance_value = float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
Z[x_values.index(x_value), y_values.index(y_value)] += performance_value
counter[x_values.index(x_value), y_values.index(y_value)] += 1
Z = Z / counter
# Create the plot for this specific dependent variable
cf = axes.contourf(X, Y, Z.T, 100)
axes.get_figure().colorbar(cf)
if scatter:
axes.scatter(X,Y,marker='.',facecolors='None', alpha=0.1)
axes.set_xlabel(axis_keys[0].strip("_").replace("_", " "))
axes.set_ylabel(axis_keys[1].strip("_").replace("_", " "))
axes.set_xlim(min(x_values), max(x_values))
axes.set_ylim(min(y_values), max(y_values))
if value_key.count("#") == 0:
axes.set_title(value_key.strip("_").replace("_", " "))
else:
axes.set_title("%s*%s+%s*%s" % tuple(value_key.split("#")))
# Return figure name
return "%s_%s_vs_%s" % (value_key, axis_keys[0].strip("_").replace("_", " "), axis_keys[1].strip("_").replace("_", " "))
def plot_numeric_vs_nominal(self, axes, numeric_key, nominal_key, value_key,
dependent_BA_plot=False, relative_plot=False, minimal=False):
""" Plot for comparison of several different values of a nominal parameter with mean and standard error
A function that allows to create a plot that visualizes the effect of
varying one numeric parameter onto the performance for several
different values of a nominal parameter.
**Parameters**
:axes: The axes into which the plot is written
:numeric_key: The numeric parameter whose effect (together with the
nominal parameter) onto the dependent variable should
be investigated.
:nominal_key: The nominal parameter whose effect (together with the
numeric parameter) onto the dependent variable should
be investigated.
:value_key: The dependent variable whose values determine the
color of the contour plot
:dependent_BA_plot:
If the `value_key` contains *time* or *iterations*
and this variable is True, the value is replaced by
*Balanced_Accuracy* and the `nominal_key` by the `value_key`.
The point in the graph are constructed by averaging
over the old `nominal parameter`.
(*optional, default: False*)
:relative_plot:
The first `nominal_key` value (alphabetic ordering) is chosen and the other
parameters are averaged relative to this parameter, to show
by which factor they change the metric.
Therefore a clean tabular is needed with only relevant
variables correctly named and where each parameter is compared
with the other. Relative plots and dependent_BA plots can be combined.
(*optional, default: False*)
:minimal:
Do not plot labels and legends.
(*optional, default: False*)
"""
colors = cycle(['b','r', 'g', 'c', 'm', 'y', 'k', 'brown', 'gray','orange'])
linestyles = cycle(['-']*10 + ['-.']*10 + [':']*10 + ['--']*10)
eps=10**(-6)
# Determine a mapping from the value of the nominal value to a mapping
# from the value of the numeric value to the achieved performance:
# nominal -> (numeric -> performance)
if (("time" in value_key) or ("Time" in value_key) or ("iterations" in value_key)) and dependent_BA_plot:
dependent_key = value_key
value_key = "Balanced_accuracy"
else:
dependent_key = False
relative_plot = False
if relative_plot:
rel_par = sorted(list(set(self.data[nominal_key])))[0]
rel_vars = self.get_variables()
curves = defaultdict(lambda: defaultdict(list))
for i in range(len(self.data[nominal_key])):
curve_key = self.data[nominal_key][i]
parameter_value = float(self.data[numeric_key][i])
if value_key.count("#") == 0:
performance_value = float(self.data[value_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
performance_value = \
float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
if relative_plot:
if curve_key == rel_par:
factor = 1
performance_value = 1
if dependent_key:
dependent_factor = self.data[dependent_key][i]
else:
rel_vars_dict = dict()
for var in rel_vars:
rel_vars_dict[var] = self.data[var][i]
rel_vars_dict[nominal_key] = rel_par
rel_data = self.get_performance_entry(rel_vars_dict)
if value_key.count("#") == 0:
try:
factor = float(rel_data[value_key])
except TypeError,e:
print rel_data
print value_key
print rel_vars_dict
print rel_vars_dict.keys()
raise(e)
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
factor = float(weight1) * float(rel_data[value_key1]) \
+ float(weight2) * float(rel_data[value_key2])
dependent_factor = rel_data.get(dependent_key,1)
if dependent_factor == 0:
dependent_factor = eps
warnings.warn("Dependent key %s got zero value in reference %s."%(
str(dependent_key),rel_par
))
if factor == 0:
factor = eps
warnings.warn("Value key %s got zero value in reference %s."%(
str(value_key),rel_par
))
else:
factor = 1
dependent_factor = 1
if not dependent_key:
curves[curve_key][parameter_value].append(performance_value/factor)
else:
curves[curve_key][parameter_value].append((performance_value/factor,float(self.data[dependent_key][i])/float(dependent_factor)))
# Iterate over all values of the nominal parameter and create one curve
# in the plot showing the mapping from numeric parameter to performance
# for this particular value of the nominal parameter
for curve_key, curve in sorted(curves.iteritems()):
curve_key = curve_key.strip("_").replace("_", " ")
x_values = []
y_values = []
y_errs = []
x_errs = []
for x_value, y_value in sorted(curve.iteritems()):
if not dependent_key:
x_values.append(x_value)
# Plot the mean of all values of the performance for this
# particular combination of nominal and numeric parameter
y_values.append(pylab.mean(y_value))
y_errs.append(scipy.stats.sem(y_value))
x_errs = None
else:
# calculate mean and standard deviation
# of metric and dependent parameter values and
# use the dependent parameter as x_value
# and the metric as y_value
mean = numpy.mean(y_value,axis=0)
metric_mean = mean[0]
time_mean = mean[1]
sem = scipy.stats.sem(y_value,axis=0)
metric_sem = sem[0]
time_sem = sem[1]
x_values.append(time_mean)
y_values.append(metric_mean)
x_errs.append(time_sem)
y_errs.append(metric_sem)
if len(x_values)<101:
if minimal:
axes.errorbar(
x_values, y_values, xerr = x_errs, yerr=y_errs,
# label=curve_key,
color=colors.next(), linestyle=linestyles.next(),
# lw=2, elinewidth=0.8, capsize=3,marker='x')
lw=4, elinewidth=0.8, capsize=3,marker='x')
else:
axes.errorbar(
x_values, y_values, xerr = x_errs, yerr=y_errs,
label=curve_key,
color=colors.next(), linestyle=linestyles.next(),
lw=2, elinewidth=0.8, capsize=3,marker='x')
else:
axes.errorbar(x_values, y_values, xerr = x_errs, yerr=y_errs,
label=curve_key,
color = colors.next(), linestyle=linestyles.next(),
lw=1, elinewidth=0.04,capsize=1)
if dependent_key:
numeric_key = dependent_key.strip("_") + " averaged dependent on " + numeric_key.strip("_")
if relative_plot:
value_key = value_key.strip("_")+" relative to "+ rel_par
if minimal:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
else:
axes.set_xlabel(numeric_key.strip("_").replace("_", " "))
if value_key.count("#") == 0:
axes.set_ylabel(value_key.strip("_").replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(value_key.split("#")))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major',
color='lightgrey', alpha=0.5)
axes.set_axisbelow(True)
prop = matplotlib.font_manager.FontProperties(size='xx-small')
prop = matplotlib.font_manager.FontProperties(size='small')
if not nominal_key=="None":
lg=axes.legend(prop=prop, loc=0,fancybox=True,title=nominal_key.strip("_").replace("_", " "))
lg.get_frame().set_facecolor('0.90')
lg.get_frame().set_alpha(.3)
# axes.set_xscale('log')
# Return figure name
return "%s_%s_vs_%s" % (value_key, nominal_key, numeric_key)
def plot_nominal(self, axes, x_key, y_key):
""" Creates a boxplot of the y_key for the given nominal parameter x_key.
A function that allows to create a plot that visualizes the effect
of differing one nominal variable onto a second one (e.g. the effect of
differing the classifier onto the accuracy).
**Expected arguments**
:axes: The axes into which the plot is written
:x_key: The key of the dictionary whose values should be used as
values for the x-axis (the independent variables)
:y_key: The key of the dictionary whose values should be used as
values for the y-axis, i.e. the dependent variable
"""
# Create the plot for this specific dependent variable
values = defaultdict(list)
for i in range(len(self.data[x_key])):
parameter_value = self.data[x_key][i]
if y_key.count("#") == 0:
performance_value = float(self.data[y_key][i])
else: # A weighted cost function
weight1, y_key1, weight2, y_key2 = y_key.split("#")
performance_value = float(weight1) * float(self.data[y_key1][i]) \
+ float(weight2) * float(self.data[y_key2][i])
values[parameter_value].append(performance_value)
values = sorted(values.items(), reverse=True)
# the bottom of the subplots of the figure
axes.figure.subplots_adjust(bottom = 0.3)
axes.boxplot(map(lambda x: x[1], values))
axes.set_xticklabels(map(lambda x: x[0], values))
matplotlib.pyplot.setp(axes.get_xticklabels(), rotation=-90)
matplotlib.pyplot.setp(axes.get_xticklabels(), size='small')
axes.set_xlabel(x_key.replace("_", " "))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
if y_key.count("#") == 0:
axes.set_ylabel(y_key.replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(y_key.split("#")))
# Return figure name
return "%s_%s" % (y_key, x_key)
def plot_nominal_vs_nominal(self, axes, nominal_key1, nominal_key2, value_key):
""" Plot comparison of several different values of two nominal parameters
A function that allows to create a plot that visualizes the effect of
varying one nominal parameter onto the performance for several
different values of another nominal parameter.
**Expected arguments**
:axes: The axes into which the plot is written
:nominal_key1: The name of the first nominal parameter whose effect
shall be investigated. This parameter determines the
x-axis.
:nominal_key2: The second nominal parameter. This parameter will be
represented by a different color per value.
:value_key: The name of the dependent variable whose values
determines the y-values in the plot.
"""
from matplotlib.patches import Polygon, Rectangle
# boxColors = ['b','r', 'g', 'c', 'm', 'y', 'k', 'brown', 'gray']
boxColors = ['steelblue','burlywood', 'crimson', 'olive', 'cadetblue',
'cornflowerblue', 'darkgray', 'darkolivegreen',
'goldenrod', 'lightcoral', 'lightsalmon', 'lightseagreen',
'lightskyblue', 'lightslategray', 'mediumseagreen',
'mediumturquoise', 'mediumvioletred', 'navy', 'orange',
'tan', 'teal', 'yellowgreen']
# Gathering of the data
plot_data = defaultdict(lambda: defaultdict(list))
for i in range(len(self.data[nominal_key2])):
nom1_key = self.data[nominal_key1][i]
nom2_key = self.data[nominal_key2][i]
if value_key.count("#") == 0:
performance_value = float(self.data[value_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
performance_value = \
float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
plot_data[nom1_key][nom2_key].append(performance_value)
# Prepare data for boxplots
box_data = []
nom1_keys = []
for nom1_key, curve in sorted(plot_data.iteritems(), reverse=True):
x_values = []
y_values = []
nom1_keys.append(nom1_key)
for x_value, y_values in sorted(curve.iteritems()):
box_data.append(y_values)
# Make sure we always have enough colors available
nom2_keys = sorted(plot_data[nom1_key].keys())
while len(nom2_keys) > len(boxColors):
boxColors += boxColors
# the bottom of the subplots of the figure
axes.figure.subplots_adjust(bottom=0.3)
# position the boxes in the range of +-0.25 around {1,2,3,...}
box_positions=[]
for i in range(len(nom1_keys)):
if len(nom2_keys) > 1:
box_positions.extend([i+1 - .25 + a*.5/(len(nom2_keys)-1)
for a in range(len(nom2_keys))])
else:
box_positions.extend([i+1])
# actual plotting; width of the boxes:
w = .5 if len(nom2_keys) == 1 else .35/(len(nom2_keys)-1)
bp = axes.boxplot(box_data, positions=box_positions, widths=w)
# design of boxplot components
matplotlib.pyplot.setp(bp['boxes'], color='black')
matplotlib.pyplot.setp(bp['whiskers'], color='black')
matplotlib.pyplot.setp(bp['fliers'], color='grey', marker='+', mew=1.5)
# use the nom1 keys as x-labels
axes.set_xticks([i+1 for i in range(len(nom1_keys))], minor=False)
axes.set_xticklabels(nom1_keys)
matplotlib.pyplot.setp(axes.get_xticklabels(), rotation=-90)
matplotlib.pyplot.setp(axes.get_xticklabels(), size='small')
axes.set_xlabel(nominal_key1.replace("_", " "))
# Now fill the boxes with desired colors by superposing polygons
numBoxes = len(nom1_keys)*len(nom2_keys)
medians = range(numBoxes)
# get all box coordinates
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
# cycle through predefined colors
k = i % len(nom2_keys)
# draw polygon
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
axes.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
axes.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Draw a legend by hand. As the legend is hand made, it is not easily
# possible to change it's location or size - sorry for inconvenience.
# width of the axes and xy-position of legend element #offset
dxy = [axes.get_xlim()[1]-axes.get_xlim()[0],
axes.get_ylim()[1]-axes.get_ylim()[0]]
xy = lambda offset: [axes.get_xlim()[0] + .8*dxy[0],
axes.get_ylim()[0] + .03*dxy[1]
+ .05*dxy[1]*offset]
# Background rectangle for the legend.
rect = Rectangle([xy(0)[0]-.02*dxy[0], xy(0)[1]-.02*dxy[1]],
.2*dxy[0],(.05*(len(nom2_keys)+1)+0.0175)*dxy[1],
facecolor='lightgrey', fill=True, zorder=5)
# legend "title"
axes.text(xy(len(nom2_keys))[0]+.03*dxy[0], xy(len(nom2_keys))[1]+.005*dxy[1],
nominal_key2.strip("_").replace("_", " "),
color='black', weight='roman', size='small', zorder=6)
axes.add_patch(rect)
# rect and text for each nom2-Value
for key in range(len(nom2_keys)):
rect = Rectangle(xy(key),.05*dxy[0],.035*dxy[1],
facecolor=boxColors[len(nom2_keys)-key-1], zorder=6)
axes.add_patch(rect)
axes.text(xy(key)[0]+.06*dxy[0], xy(key)[1]+.005*dxy[1],
nom2_keys[len(nom2_keys)-key-1],
color='black', weight='roman', size='small', zorder=6)
# Add a horizontal grid to the plot
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
if value_key.count("#") == 0:
axes.set_ylabel(value_key.strip("_").replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(value_key.split("#")))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
# Return figure name
return "%s_%s_vs_%s" % (value_key, nominal_key1, nominal_key2)
def plot_histogram(self, axes, metric, numeric_parameters, nominal_parameters,
average_runs = True):
""" Plots a histogram of the values the given metric takes on in data
Plots histogram for *metric* in which each parameter combination from
*numeric_parameters* and *nominal_parameters* corresponds
to one value (if *average_runs* == True) or each run corresponds
to one value (if *average_runs* == False).
The plot is written into *axes*.
"""
if average_runs == False:
metric_values = map(float, self.data[metric])
else:
# Merge all parameters in one list
parameters = list(numeric_parameters)
parameters.extend(nominal_parameters)
# Sort metric values according to the parameterization for the
# specific value
all_values = defaultdict(list)
for i in range(len(self.data[metric])):
key = tuple(self.data[parameter][i] for parameter in parameters)
all_values[key].append(float(self.data[metric][i]))
# Combine the mean value of the metric for each parameter
# combination
metric_values = [numpy.mean(value)
for value in all_values.itervalues()]
# Plot and store the histogram
axes.hist(metric_values, histtype='stepfilled', align='left')
axes.set_ylim((0, pylab.ylim()[1]))
axes.set_xlabel(metric if average_runs == False
else "Mean %s" % metric)
axes.set_ylabel('Occurrences')
# Return figure name
return "%s_histogram" % metric
###############################################################################
class ROCCurves(object):
""" Class for plotting ROC curves """
def __init__(self, base_path):
self.roc_curves = self._load_all_curves(base_path)
self.colors = cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k', 'brown', 'gray'])
def is_empty(self):
""" Return whether there are no loaded ROC curves """
return len(self.roc_curves) == 0
def plot(self, axis, selected_variable, projection_parameter, fpcost=1.0,
fncost=1.0, collection=None):
# Draw cost grid into the background
for cost in numpy.linspace(0.0, fpcost+fncost, 25):
axis.plot([0.0, 1.0], [1-cost/fncost, 1-(cost-fpcost)/fncost],
c='gray', lw=0.5)
# # If we do not average:
# if selected_variable == None:
# # Delegate to plot_all method
# return self.plot_all(axis, projection_parameter, collection)
# Draw an additional "axis" (the identity) to show skew/centroid of
# ROC curves
axis.plot([0.0, 1.0], [0.0, 1.0], c='k', lw=2)
for k in numpy.linspace(0.0, 1.0, 11):
axis.plot([k+0.01, k-0.01], [k-0.01, k+0.01], c='k', lw=1)
# Create a color dict
color_dict = defaultdict(lambda : self.colors.next())
# Some helper function
def create_roc_function(roc_curve):
""" Create a function mapping FPR onto TPR for the given roc_curve
"""
def roc_function(query_fpr):
""" Map FPR onto TPR using linear interpolation on ROC curve."""
if query_fpr == 0.0: return 0.0 # Avoid division by zero
last_fpr, last_tpr = 0.0, 0.0
for fpr, tpr in roc_curve:
if fpr >= query_fpr:
return (query_fpr - last_fpr) / (fpr - last_fpr) * \
(tpr - last_tpr) + last_tpr
last_fpr, last_tpr = fpr, tpr
return tpr
return roc_function
def create_weight_function(x_values, mean_curve):
"""
Creates a function that computes the orthogonal distance of the ROC
curve from the identity axis at an arbitrary (k,k)
"""
def weight_function(k):
"""
Creates a function that computes the orthogonal distance of the
ROC curve from the identity axis at (k,k)
"""
if k == 0.0: return 0.0 # Avoid division by zero
for fpr, tpr in zip(x_values, mean_curve):
if 0.5 * fpr + 0.5 * tpr >= k:
return 2 * (0.5 * fpr - 0.5 * tpr)**2
return 0.0
return weight_function
# Create mapping parameterization -> ROC functions
roc_fct_dict = defaultdict(list)
for parametrization, roc_curve in self._project_onto_subset(
self.roc_curves, projection_parameter):
key = parametrization[selected_variable] \
if selected_variable is not None and selected_variable \
in parametrization.keys() else "Global"
roc_fct_dict[key].append(create_roc_function(roc_curve))
# Iterate over all parametrization and average ROC functions and compute
# centroid
for param, roc_fcts in roc_fct_dict.iteritems():
x_values = numpy.linspace(0.0, 1.0, 500)
roc_values = []
for x in x_values:
roc_values.append([roc_fct(x) for roc_fct in roc_fcts])
mean_curve = map(numpy.mean, roc_values)
# Compute centroid of the mean ROC curve over the identity axis
weight_fct = create_weight_function(x_values, mean_curve)
k_values = numpy.linspace(0.0, 1.0, 100)
weights = [weight_fct(k) for k in numpy.linspace(0.0, 1.0, 100)]
centroid = sum(k_values[i]*weights[i] for i in range(len(k_values))) \
/ sum(weights)
if selected_variable == None:
color = self.colors.next()
else:
color = color_dict[param]
axis.plot(x_values, mean_curve, c=color,
label=str(param).replace("_"," ").strip())
axis.errorbar(x_values[::25], mean_curve[::25],
yerr=map(scipy.stats.sem, roc_values)[::25],
c=color, fmt='.')
axis.plot([centroid], [centroid],
c=color, marker='h')
axis.set_xlabel("False positive rate")
axis.set_ylabel("True positive rate")
axis.set_xlim(0.0, 1.0)
axis.set_ylim(0.0, 1.0)
axis.legend(loc=0)
if selected_variable is not None:
axis.set_title(str(selected_variable).replace("_"," ").strip())
def plot_all(self, axis, projection_parameter, collection=None):
""" Plot all loaded ROC curves after projecting onto subset. """
# Iterate over all ROC curves for parametrization that are selected
# by projection_parameter.
for parametrization, roc_curve in self._project_onto_subset(self.roc_curves,
projection_parameter):
color = self.colors.next()
axis.plot(map(itemgetter(0), roc_curve), map(itemgetter(1), roc_curve),
c=color)
# fpr = eval(collection.data['False_positive_rate'][0])
# tpr = eval(collection.data['True_positive_rate'][0])
# axis.scatter([fpr], [tpr], c='k', s=50)
axis.set_xlabel("False positive rate")
axis.set_ylabel("True positive rate")
axis.set_xlim(0.0, 1.0)
axis.set_ylim(0.0, 1.0)
axis.legend(loc=0)
def _load_all_curves(self, dir):
""" Load all ROC curves located in the persistency dirs below *dir* """
all_roc_curves = []
for subdir in [name for name in os.listdir(dir)
if os.path.isdir(os.path.join(dir, name))]:
if not subdir.startswith("{"): continue
parametrization = {}
tokens = subdir.strip("}{").split("}{")
parametrization["__Dataset__"] = tokens[0]
for token in tokens[1:]:
# TODO if anything else then node chain template
# has no # this will fail;
# delete as soon as no more data with node chain templates
# in folder names circulate
if '#' not in token:
parametrization["__Template__"] = token
continue
key, value = token.split("#")
try:
value = eval(value)
except:
pass
parametrization[key] = value
for run_dir in glob.glob(dir + os.sep + subdir
+ os.sep + "persistency_run*"):
run = eval(run_dir.split("persistency_run")[1])
for split_file in glob.glob(run_dir + os.sep + "PerformanceSinkNode"
+ os.sep + "roc_points_sp*.pickle"):
split = eval(split_file.split("roc_points_sp")[1].strip(".pickle"))
rs_parametrization = dict(parametrization)
rs_parametrization["__Key_Run__"] = run
rs_parametrization["__Run__"] = "__Run_"+str(run)
rs_parametrization["__Key_Fold__"] = split
rs_parametrization["__Split__"] = "__Split_"+str(split)
roc_curves = cPickle.load(open(split_file, 'r'))
all_roc_curves.append((rs_parametrization, roc_curves[0]))
return all_roc_curves
def _project_onto_subset(self, roc_curves, constraints):
""" Retain only roc_curves that fulfill the given constraints. """
for parametrization, roc_curve in roc_curves:
# Check constraints
constraints_fulfilled = True
for constraint_key, constraint_values in constraints.iteritems():
if not constraint_key in parametrization or not \
parametrization[constraint_key] in constraint_values:
constraints_fulfilled = False
break
if constraints_fulfilled:
yield (parametrization, roc_curve)
|
gpl-3.0
|
irockafe/revo_healthcare
|
src/project_fxns/rt_window_prediction.py
|
1
|
13555
|
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.utils import shuffle
from scipy import interp
import pickle
# My code
import visualization.viz as viz
def get_tpr_fpr(X, y, clf, cross_val):
'''
GOAL:
Get the stats to plot an ROC curve
INPUT:
X - (samples x features) numpy array
y - (samles,) numpy array
clf - classifier to use
cross_val - cross-validation strategy to use
'''
t1 = time.time()
# collect vals for the ROC curves
tpr_list = []
mean_fpr = np.linspace(0, 1, 100)
auc_list = []
# Get the false-positive and true-positive rate
for i, (train, test) in enumerate(cross_val):
clf.fit(X[train], y[train])
y_pred = clf.predict_proba(X[test])[:, 1]
# get fpr, tpr
fpr, tpr, thresholds = roc_curve(y[test], y_pred)
roc_auc = auc(fpr, tpr)
tpr_list.append(interp(mean_fpr, fpr, tpr))
tpr_list[-1][0] = 0.0
auc_list.append(roc_auc)
if (i % 10 == 0):
print('{perc}% done! {time}s elapsed'.format(
perc=100*float(i)/cross_val.n_iter, time=(time.time() - t1))
)
return tpr_list, auc_list, mean_fpr
def roc_curve_cv(X, y, clf, cross_val, color='blue',
path='/home/irockafe/Desktop/roc.pdf',
save=False, plot=False,
):
'''
PURPOSE:
Creates an ROC curve
INPUT:
X - (samples x features) numpy array
y - (samles,) numpy array
clf - classifier to use
cross_val - cross-validation strategy to use
color - color to plot ROC curve
path- where to dump a pdf of the cross-validated ROC curve
'''
tpr_list, auc_list, mean_fpr = get_tpr_fpr(X, y, clf, cross_val)
# Return the stuff you can use to plot
print 'Plottttt!'
my_plt = viz.roc_curve(tpr_list, mean_fpr, auc_list,
cross_val, path, color=color)
if plot:
my_plt.show()
return my_plt
def roc_curve_cv_null(X, y, clf, cross_val, num_shuffles=5, color='black',
path='/home/irockafe/Desktop.roc.pdf',
save=False, plot=True,
):
'''
PURPOSE:
Creates a null ROC curve by shuffling class labels
INPUT:
X - (samples x features) numpy array
y - (samles,) numpy array
clf - classifier to use (scikit)
cross_val - cross-validation strategy to use (scikit crossvalidation
object)
num_shuffles - number of times to shuffle class labels
color - color to plot the ROC curve
path- where to dump a pdf of the cross-validated ROC curve
'''
tpr_list, auc_list, mean_fpr = get_tpr_fpr(X, y, clf, cross_val)
# Return the stuff you can use to plot
# shuffle y lots of times
# collect tpr and fpr rates from each fold and shuffle
# Then plot the mean of them all
null_tpr_list = []
null_auc_list = []
for i in range(0, num_shuffles):
# Iterate through the shuffled y vals and repeat with appropriate params
# Retain the auc vals for final plotting of distribution
y_shuffle = shuffle(y)
cross_val.y = y_shuffle
cross_val.y_indices = y_shuffle
print ('Number of differences b/t original and' +
'shuffle: {num}'.format(num=(y == cross_val.y).sum())
)
# Get auc values for number of iterations
mean_tpr_null, auc_list_null, mean_fpr_null = get_tpr_fpr(X,
y_shuffle,
clf,
cross_val)
null_tpr_list += mean_tpr_null
null_auc_list += auc_list_null
my_plt = viz.roc_curve(null_tpr_list, mean_fpr_null, null_auc_list,
cross_val, path, color=color)
if save:
my_plt.savefig(path, format='pdf')
if plot:
my_plt.show()
return my_plt
def rt_slice(df, rt_bounds):
'''
PURPOSE:
Given dataframe (features x samples)
with 'mz' and 'rt' column headers,
retain only the features whose rt is between left
and right bounds (1st and second entries of rt_bounds)
INPUT:
df - a pandas dataframe feature table with 'mz' and 'rt' column
headers (i.e. from xcms), along with sample column headers
and features along the rows
rt_bounds: the boundaries of your rt_slice (left_bound, right_bound)
OUTPUT:
Feature table containing only the features between the specified.
rt window. Note that it also contains the rt and mz columns, too.
'''
out_df = df.loc[(df['rt'] > rt_bounds[0]) &
(df['rt'] < rt_bounds[1])]
return out_df
def plot_mz_rt(df, rt_bounds, path='/home/irockafe/Desktop/poop.pdf',
bins=100):
'''
PURPOSE: Plot the mz/rt space of your dataset,
along with histograms of the rt and mz presence
INPUT:
df - a pandas dataframe feature table,
containing columns labeled 'rt' and 'mz', along with
the sample columns
rt_bounds - iterable containing retention time bounds
for a particular slice: (left bound, right bound)
path - where to save the figure to.
OUTPUT:
a pdf file showing mz/rt points and the rt-bound
'''
# the random data
x = df['rt']
y = df['mz']
print np.max(x)
print np.max(y)
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(10, 10))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y, s=1)
x_min = np.min(x)-50
x_max = np.max(x)+50
axScatter.set_xlim(x_min, x_max)
y_min = np.min(y)-50
y_max = np.max(y)+50
axScatter.set_ylim(y_min, y_max)
# Add vertical/horizontal lines to scatter and histograms
axScatter.axvline(x=rt_bounds[0], lw=2, color='r', alpha=0.5)
axScatter.axvline(x=rt_bounds[1], lw=2, color='r', alpha=0.5)
axHistx.axvline(x=rt_bounds[0], lw=2, color='r', alpha=0.5)
axHistx.axvline(x=rt_bounds[1], lw=2, color='r', alpha=0.5)
axHistx.hist(x.dropna(), bins=bins)
axHisty.hist(y.dropna(), bins=bins, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
axScatter.set_ylabel('m/z', fontsize=30)
axScatter.set_xlabel('Retention Time', fontsize=30)
axHistx.set_ylabel('# of Features', fontsize=20)
axHisty.set_xlabel('# of Features', fontsize=20)
plt.savefig(path,
format='pdf')
plt.show()
def slice_and_predict(df, y, rt_window, not_samples, rf_estimators=1000,
n_iter=10, test_size=0.3, random_state=1,
mzrt_path='/home/irockafe/Desktop/poop.pdf',
roc_path='/home/irockafe/Desktop/roc.pdf',
):
'''
PURPOSE: Slice mass-spec run into time-windows, then using
that subset, try to predict outcome
INPUT:
df -
pandas dataframe, from xcms, that includes columns with 'mz'
and 'rt'. Do all your preprocessing before this function!
(sample-thresholds, normalization, etc)
y -
class-labels encoded by scikit LabelEncoder.
Make sure their order matches the dataframe's
rt_window -
(left_bound, right_bound) of retention times
not_samples -
a list of column headers to be removed from df before
converting to feature table e.g. ['mz', 'rt', 'mzmin']
rf_estimators -
Number of trees in random forest
n_iter -
Number of cross-validation iterations
test_size -
fraction of samples to be held back as test set
random_state -
numpy random state
mzrt_path -
where file showing mzrt files will be saved
roc_path -
where file showing roc curve will be saved
OUTPUT:
pdf files
- showing mzrt space, with histograms of density
- showing cross-validated ROC curves
auc_vals -
The auc values from cross-validation (this way you can plot
stuff)
'''
# plot selection
plot_mz_rt(df, rt_window, path=mzrt_path)
# Get slice and convert to feature table
df_slice = rt_slice(df, rt_window)
# remove columns with adduct info, extranneous stuff: mz, rt, etc
samples_list = df_slice.columns.difference(not_samples)
# convert to samples x features
df_slice_processed = df_slice[samples_list].T
X_slice = df_slice_processed.as_matrix()
print "slice shape", X_slice.shape
print 'y shape', y.shape
# Run RF
rf_estimators = rf_estimators
n_iter = n_iter
test_size = test_size
random_state = random_state
cross_val_rf = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
random_state=random_state)
clf_rf = RandomForestClassifier(n_estimators=rf_estimators,
random_state=random_state)
tpr_vals, auc_vals, mean_fpr = roc_curve_cv(X_slice, y, clf_rf,
cross_val_rf,
save=True, path=roc_path)
return auc_vals
def make_sliding_window(min_val, max_val, width, step):
'''
PURPOSE: Create a sliding window given min, max, window width
and step-size
INPUT:
min_val: Minimum value
max_val: Maximum value
width: Window width
step: stepsize for each window
OUTPUT:
list of tuples [(left,right),(left2, right2),...]
that give the bounds of a sliding window
'''
if step > width:
raise ValueError("Your step should be less than" +
"or equal to the width of the window")
left_bound = np.arange(min_val, max_val, step)
right_bound = left_bound + width
rt_bounds = zip(left_bound, right_bound)
# remove any bounds that go past the maximum value
for idx, i in enumerate(rt_bounds):
if i[1] > max_val:
rt_bounds.pop(idx)
return rt_bounds
def sliding_rt_window_aucs(X_df, y, sliding_window, not_samples,
rf_trees=500, n_iter=3, test_size=0.3,
output_path='/home/irockafe/Desktop/'):
all_aucs = np.full([len(sliding_window), n_iter], np.nan)
for i, rt_slice in enumerate(sliding_window):
print 'RT plot', rt_slice
auc_vals = slice_and_predict(X_df, y, rt_slice,
not_samples, rf_estimators=rf_trees,
n_iter=n_iter, test_size=test_size,
random_state=1,
mzrt_path=(output_path +
'/mzrt_window_%i.pdf' % i),
roc_path=(output_path +
'roc_window_%i.pdf' % i))
# add aucs vals to array
all_aucs[i] = auc_vals
print '\n\n\n'+'-'*50+'NEXT ROUND'+'-'*50+'\n\n\n'
# write aucs to file
pickle.dump(all_aucs,
open(output_path+'auc_vals.pkl', 'wb'))
return all_aucs
def plot_auc_vs_rt(auc_vals, sliding_window, df,
path, plot=True, save=True):
# plot auc_vals vs. median of sliding_window
# get middle of sliding window points
x = [np.mean(i) for i in sliding_window]
y_mean = [np.mean(i) for i in auc_vals]
y_std = [np.std(i) for i in auc_vals]
# Set up the figure
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
# Start with rect figure
plt.figure(1, figsize=(10, 10))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
# no labels
nullfmt = NullFormatter()
axHistx.xaxis.set_major_formatter(nullfmt)
# scatter plot
axScatter.scatter(x, y_mean, 100)
axScatter.errorbar(x, y_mean, yerr=y_std,
fmt='o', capsize=5)
# Histograms
axHistx.hist(df['rt'], bins=100)
# Labels
axScatter.set_ylabel('AUC', fontsize=30)
axScatter.set_xlabel('Retention Time', fontsize=30)
axScatter.set_ylim([0.5, 1.0])
axHistx.set_ylabel('# of Features', fontsize=20)
# plot a histogram of the feature prevalence
plt.savefig(path, format='pdf')
plt.show()
#
|
mit
|
sergiohr/NeuroDB
|
neurodb/cluster.py
|
1
|
16460
|
'''
Created on Jul 16, 2015
@author: sergio
'''
import numpy as np
import ctypes
import numpy.ctypeslib as npct
import matplotlib.pyplot as plt
import psycopg2
import time
import neurodb.neodb.core
from math import e, pow
from scipy.optimize import leastsq
import neurodb
import random
from sklearn.cluster import KMeans, AgglomerativeClustering, MiniBatchKMeans
from neurodb.cfsfdp import libcd
import db
import multiprocessing as mp
import neurodb.features
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import time
output = mp.Queue()
def compare_array(a, b):
for i in range(len(a)):
if a[i] != b[i]:
print a[i], b[i]
return False
print "iguales"
return True
def show_features(nodo, index, centers = None):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
connection = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
cursor = connection.cursor()
qcolor = ['red', 'blue', 'green', 'black', 'yellow', 'white']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for k in np.unique(index):
subnodo = []
for q in range(len(nodo)):
if index[q] == k:
subnodo.append(nodo[q])
query = """select p1, p2, p3 from features where """
condition = ""
for f in subnodo:
condition = condition + "id=%s or "%(f)
condition = condition[:len(condition)-5]
query = query + condition
cursor.execute(query)
results = cursor.fetchall()
x = []
y = []
z = []
for i in results:
x.append(i[0])
y.append(i[1])
z.append(i[2])
ax.scatter(x, y, z, color=qcolor[int(k)])
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
if centers != None:
qcolor = ['red', 'blue', 'green', 'black', 'yellow', 'white']
k = 0
for c in centers:
query = """select p1, p2, p3 from features where id=%s"""%(nodo[c])
cursor.execute(query)
results = cursor.fetchall()
x = []
y = []
z = []
#for i in range(len(results)):
x.append(results[0][0])
y.append(results[0][1])
z.append(results[0][2])
#print "c:%s p1:%s p2:%s p3:%s"%(nodo[c], results[0][0], results[0][1], results[0][2])
ax.scatter(x, y, z, s=200, marker='o', color=qcolor[k])
ax.text(results[0][0],results[0][1],results[0][2],str(c))
k=k+1
plt.show()
def ajuste(local_density, coeficientes):
vajuste = np.zeros(len(local_density))
for j in range(len(local_density)):
vajuste[j] = np.polynomial.polynomial.polyval(local_density[j], coeficientes)
return vajuste
def show_selection(rho, delta, plot = True):
n = len(rho)
max = rho.max()
max = int(max*0.1)
deltacp = np.copy(delta)
#pmean = deltacp.mean()
pmean = 0
for i in delta:
pmean = pmean + i
pmean = pmean/n;
#print "mean:", pmean
for j in range(len(deltacp)):
if (rho[j] < max):
deltacp[j] = pmean
argmax = deltacp.argmax()
max = deltacp[argmax]
deltacp[argmax] = max/2
coeficientes1, stats1= np.polynomial.polynomial.polyfit(rho, deltacp, 1, full=True)
vajuste = np.zeros(len(rho))
for j in range(len(rho)):
vajuste[j] = np.polynomial.polynomial.polyval(rho[j], coeficientes1)
ajuste1 = vajuste
desvio1 = (stats1[0][0]/float(n))**0.5
deltacp[argmax] = max;
#print "ajuste1: m:%s b:%s sd:%s"%(coeficientes1[1], coeficientes1[0], desvio1)
deltacp = np.copy(delta)
y1 = deltacp[rho.argmin()]
x1 = rho.min()
y2 = 0
x2 = rho.max()*0.1
coeficientes1 = [-x1*(y2-y1)/(x2-x1) + y1,(y2-y1)/(x2-x1)]
ajuste2 = ajuste(rho, coeficientes1)
#print "ajuste2: m:%s b:%s sd:%s"%(coeficientes1[1], coeficientes1[0], desvio1)
centers = []
for i in range(len(rho)):
if((delta[i] > ajuste1[i] + 2*desvio1) and (delta[i] > ajuste2[i] + 2*desvio1)):
centers.append(i)
if plot:
plt.plot(rho, deltacp, 'bo')
plt.plot(rho, ajuste1, 'r')
plt.plot(rho, ajuste1 + 2*desvio1, 'g')
plt.plot(rho, ajuste2 + 2*desvio1, 'g')
plt.show()
return centers
class DPClustering():
def __init__(self, points=3, percentage_dc=1.8, kernel="gaussian", threading = "multi", nnodos = 4):
if threading not in ["multi", "serial"]:
raise StandardError("""Parameter threading must be contains 'multi' or 'serial'.""")
self.connect = "dbname=demo host=172.16.162.128 user=postgres password=postgres"
self.threading = threading
self.points = points
self.percentage_dc = percentage_dc
self.kernel = kernel
self.nnodos = nnodos
def fitSpikes(self, spike_ids = None, recordingchannel_id = None):
results = []
spike_ids_cp = np.copy(spike_ids)
if spike_ids != None :
if self.threading == "multi":
results = self.__process_multi(spike_ids)
if self.threading == "serial":
if self.nnodos == 1:
spike_ids, labels = self.__process_serial(spike_ids)
#Lo que entra no es lo que sale de esta funcion, corregir
return labels
else:
results = self.__process_serial(spike_ids)
if results != []:
templates = []
ids = []
for x in results:
templates.append(x[0])
ids.append(x[1])
smod = np.float(1.5)
features_ids = self.__insertFeaturesTemplate(templates, ids)
features_ids = np.array(features_ids, np.float64)
nspikes = len(features_ids)
rho = np.empty(nspikes)
delta = np.empty(nspikes)
id_spikes = np.empty(nspikes)
cluster_index = np.empty(len(features_ids))
dc = libcd.getDC(self.connect, features_ids, id_spikes, len(features_ids), np.float(2.0), self.points)
libcd.dpClustering(features_ids, len(features_ids), dc, self.points, "gaussian", id_spikes, cluster_index, rho, delta, smod)
#cent = show_selection(rho, delta, plot=False)
#show_features(features_ids, np.ones(len(features_ids)), cent)
# Cuando se hace una consulta a la base no se devuelve los ids ordenados segun la consulta
spikes = neurodb.features.getFromDB(features_id=features_ids, column='extra')
labels = []
for j in range(len(spike_ids_cp)):
flag = 0
for k in range(len(spikes)):
if spike_ids_cp[j] in spikes[k][1]:
labels.append(cluster_index[k])
flag = 1
if flag == 0:
labels.append(0)
neurodb.features.removeOnDB(features_id=features_ids)
self.__saveLabelsMulti(spike_ids, labels)
return np.array(labels)
def __select_nodes(self, spikes):
spikes_cp = list(spikes)
random.shuffle(spikes_cp)
len_spikes = len(spikes_cp)
len_nodo = np.ceil(float(len_spikes)/float(self.nnodos))
nodos = []
for i in range(self.nnodos):
nodo = []
j = 0
while(spikes_cp != [] and j<len_nodo):
nodo.append(spikes_cp.pop())
j = j + 1
nodos.append(nodo)
return nodos
def __process_multi(self, spikes):
output = mp.Queue()
nodos = self.__select_nodes(spikes)
process = []
for i in range(self.nnodos):
process.append(mp.Process(target=self.__clustering, args=(nodos[i], self.points, output)))
for p in process:
p.start()
results = [output.get() for p in process]
out=[]
for r in results:
for t in r:
out.append(t)
for p in process:
p.join()
return out
def __process_serial(self, spikes):
output = mp.Queue()
nodos = self.__select_nodes(spikes)
results = []
if (self.nnodos == 1):
spikes_id, labels = self.__clustering(spikes, self.points, output)
return spikes_id, labels
for nodo in nodos:
self.__clustering(nodo, self.points, output)
result = output.get()
results.append(result)
out=[]
for r in results:
for t in r:
out.append(t)
return out
def __clustering(self, nodo, points, output):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
smod = np.float(2.5)
connect = "dbname=demo host=172.16.162.128 user=postgres password=postgres"
spikes_id = np.array(nodo, np.float64)
nspikes = len(nodo)
rho = np.empty(nspikes)
delta = np.empty(nspikes)
nneigh = np.empty(nspikes)
centers = np.empty(nspikes)
cluster_index = np.empty(nspikes)
features = neurodb.features.getFeaturesFromSpikes(nodo, connection=dbconn)
dc = libcd.getDC(connect, features, spikes_id, nspikes, np.float(1.8), points)
libcd.dpClustering(features, nspikes, dc, points, "gaussian", spikes_id, cluster_index, rho, delta, smod)
#plt.plot(delta[rho.argsort()])
#plt.show()
#show_selection(rho, delta)
#show_features(features, cluster_index)
if (self.nnodos == 1):
return spikes_id, cluster_index
#plt.plot(rho, delta, 'o')
#plt.show()
templates = []
spikes = []
out = []
for i in range(1, int(cluster_index.max())+1):
template = np.zeros(64, np.float64)
gspikes = []
k = 0
for j in range(nspikes):
if cluster_index[j] == i:
spike = neurodb.neodb.core.spikedb.get_from_db(dbconn, id = int(spikes_id[j]))
signal = spike[0].waveform
template = template + signal
gspikes.append(spikes_id[j])
k = k + 1
template = template/k
out.append((template, gspikes))
dbconn.close()
output.put(out)
def __insertFeaturesTemplate(self, templates, spike_ids):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
if (len(templates) < 10):
raise StandardError("The amount of templates is not enough for calculating PCA. Templates: %s"%(len(templates)))
pca = PCA(n_components=10)
transf = pca.fit_transform(templates)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
#spike_ids = np.float16(spike_ids)
cursor = dbconn.cursor()
ids = []
i = 0
for x in transf:
query = """INSERT INTO FEATURES (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, extra)
VALUES (%s, %s,%s, %s,%s,
%s,%s, %s,%s, %s, %s) RETURNING id"""
cursor.execute(query, [x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
x[8], x[9], psycopg2.Binary(np.float32(spike_ids[i]))])
id_of_new_row = cursor.fetchone()[0]
ids.append(id_of_new_row)
i = i+1
dbconn.commit()
return ids
def __saveLabelsMulti(self, spikes, labels):
process = []
nprocess = 14
nn = int(len(spikes)/nprocess)
#self.__saveLabels(spikes[0:nn-1], labels[0:nn-1])
for i in range(nprocess-1):
process.append(mp.Process(target=self.__saveLabels, args=(spikes[i*nn:i*nn+nn], labels[i*nn:i*nn+nn])))
process.append(mp.Process(target=self.__saveLabels, args=(spikes[(nprocess-1)*nn:len(spikes)], labels[(nprocess-1)*nn:len(spikes)])))
for p in process:
p.start()
for p in process:
p.join()
pass
def __saveLabels2(self, spikes, labels):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
cursor = dbconn.cursor()
for i in range(len(spikes)):
query = "UPDATE features SET label=%s where id_spike=%s"%(labels[i],spikes[i])
cursor.execute(query)
dbconn.commit()
dbconn.close()
def __saveLabels(self, spikes, labels):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
cursor = dbconn.cursor()
query = "UPDATE features SET label="
case = "CASE id_spike "
ids = ""
for i in range(len(spikes)):
case = case + "WHEN '%s' THEN %s "%(spikes[i], int(labels[i]))
ids = ids + "%s, "%(spikes[i])
case = case + "END "
ids = ids[:len(ids)-2]
query = query + case
query = query + "WHERE id_spike in (%s)"%ids
#print query
cursor.execute(query)
dbconn.commit()
dbconn.close()
if __name__ == '__main__':
connect = "dbname=demo host=172.16.162.128 user=postgres password=postgres"
id_project = 19
#id_session = "84" #5768 spikes
#id_session = "94" #74394 spikes
id_session = "98" #2800 spikes
channel = "1"
points = 3
n_nodos = 20
if db.NDB == None:
db.connect()
color = ['bo', 'ro', 'go', 'co', 'ko', 'mo', 'b^', 'r^', 'g^', 'c^', 'k^', 'm^']
centers = []
rho = np.array([], np.float64)
delta = np.array([], np.float64)
ncenters = 0
project = neurodb.project.get_from_db(id_project)
session = project.get_session(int(id_session))
channels = session.get_channels()
for ch in channels:
if ch['channel']==int(channel):
rc = session.get_channel(ch['id'])
spikes = rc.get_spikes()
np.set_printoptions(threshold=np.nan)
dp = DPClustering(points=points, percentage_dc=2, kernel="gaussian", threading = "serial", nnodos = n_nodos)
labels = dp.fitSpikes(spikes)
#print labels
# for i in range(0, int(labels.max())+1):
# count = 0
# template = np.zeros(64, np.float64)
# plt.subplot(1,int(labels.max())+1,i+1)
# for j in range(len(spikes)):
# if labels[j] == i:
# spike = neurodb.neodb.core.spikedb.get_from_db(db.NDB, id = int(spikes[j]))
# signal = spike[0].waveform
# template = template + signal
# plt.plot(signal, 'b')
# count = count + 1
# if count != 0:
# plt.plot(template/count, 'r')
# plt.title("Cluster " + str(i) + ": # " + str(count))
#
# plt.show()
#
# pass
|
gpl-3.0
|
Garrett-R/scikit-learn
|
sklearn/utils/tests/test_validation.py
|
12
|
7588
|
"""Tests for input validation functions"""
from tempfile import NamedTemporaryFile
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from itertools import product
from sklearn.utils import as_float_array, check_array
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.utils.validation import has_fit_parameter
def test_as_float_array():
"""Test function for as_float_array"""
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
"""Confirm that input validation code does not return np.matrix"""
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
"""Confirm that input validation code doesn't copy memory mapped arrays"""
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
"""Check that ordering is enforced correctly by validation utilities.
We need to check each validation utility, because a 'copy' without
'order=K' will kill the ordering.
"""
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
for copy in (True, False):
Y = check_array(X, accept_sparse='csr', copy=copy, order='C')
assert_true(Y.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
X_checked = check_array(X, dtype=dtype, accept_sparse=accept_sparse,
copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
|
bsd-3-clause
|
devanshdalal/scikit-learn
|
examples/datasets/plot_random_dataset.py
|
348
|
2254
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
bsd-3-clause
|
saintdragon2/python-3-lecture-2015
|
sinsojael_final/2nd_presentation/8조/Carculator2.py
|
1
|
4919
|
__author__ = 'winseven'
from pylab import *
from tkinter import *
import math
import numpy as np
import matplotlib.pyplot as plt
#이벤트 처리함수
def enter(btn):
if btn == 'C':
ent.delete(0, END)
elif btn == '=':
ans = eval(ent.get())
ent.delete(0, END)
ent.insert(0, ans)
else:
ent.insert(END, btn)
def quit():
root.destroy()
root.quit()
#창만들기
def sing():
dx = 0.01
xs = []
sinxs = []
for i in range(0,1000):
x = i*dx
xs.append(x)
sinxs.append(math.sin(x))
plt.plot(xs,sinxs)
plt.show()
def cosg():
dx = 0.01
xs = []
cosxs = []
for i in range(0,1000):
x = i*dx
xs.append(x)
cosxs.append(math.cos(x))
plt.plot(xs,cosxs)
plt.show()
def tang():
theta = np.arange(0.01, 10., 0.04)
ytan = np.tan(theta)
plt.figure()
plt.plot(theta, ytan)
plt.ylim(-8, 8)
plt.axhline(color="gray", zorder=-1)
plt.show()
#def lng():
#x = arange(8.0,10.0,0.01)
#plot(x,log(x),'-')
#grid(True)
#xlabel('x')
#ylabel('y')
#plt.axis([-1, 10, -4.0, 3.0])
#show()
def logg():
x = arange(-5.0,7.0,0.01)
plot(x,log10(x),'-')
grid(True)
xlabel('x')
ylabel('y')
plt.axis([-2, 7, -2.0, 4.0])
show()
def eg():
x = arange(-4.0,10.0,0.001)
plot(x,exp(x),'-')
grid(True)
xlabel('x')
ylabel('y')
plt.axis([-1, 11, -3.0, 2000.0])
show()
def ag():
x = arange(-4.0,10.0,0.001)
plot(x,exp2(x),'-')
grid(True)
xlabel('x')
ylabel('y')
plt.axis([-1, 11, -3.0, 1010.0])
show()
def help():
master = Tk()
master.title('도움말')
Label(master, text="함수값 구하는 방법").grid(row=1)
Label(master, text="").grid(row=2)
Label(master, text="sin값 구하기: math.sin(n)[n은 각도]").grid(row=3)
Label(master, text="").grid(row=4)
Label(master, text="cos값 구하기: math.cos(n)[n은 각도]").grid(row=5)
Label(master, text="").grid(row=6)
Label(master, text="tan값 구하기: math.tan(n)[n은 각도]").grid(row=7)
Label(master, text="").grid(row=8)
Label(master, text="cosec값 구하기: 1/math.sin(n)[n은 각도]").grid(row=9)
Label(master, text="").grid(row=10)
Label(master, text="sec값 구하기: 1/math.cos(n)[n은 각도]").grid(row=11)
Label(master, text="").grid(row=12)
Label(master, text="cot값 구하기: 1/math.tan(n)[n은 각도]").grid(row=13)
Label(master, text="").grid(row=14)
Label(master, text="e^x값 구하기: math.exp(n)[n은 지수]").grid(row=15)
Label(master, text="").grid(row=16)
Label(master, text="ln값 구하기: math.log(n)[n은 진수]").grid(row=17)
Label(master, text="").grid(row=18)
Label(master, text="a^x값 구하기: math.expa(n)[a는 밑,n은 진수]").grid(row=19)
Label(master, text="").grid(row=20)
Label(master, text="log값 구하기: math.loga(n)[a는 밑,n은 진수]").grid(row=21)
Label(master, text="").grid(row=22)
Label(master, text="라디안으로 삼각함수값 구하기:").grid(row=23)
Label(master, text="").grid(row=24)
Label(master, text="ex)sin(1/2*Ø): math.sin(math.radians(90))").grid(row=25)
Label(master, text="").grid(row=26)
Label(master, text="ex)cos(0): math.cos(math.radians(0))").grid(row=27)
Label(master, text="").grid(row=28)
Label(master, text="ex)tan(1/4*Ø): math.tan(math.radians(45))").grid(row=29)
mainloop( )
root=Tk()
root.title('계산기')
menubar = Menu(root)
#그래프 메뉴
graphmenu = Menu(menubar, tearoff=0)
graphmenu.add_command(label='sin', command=sing)
graphmenu.add_separator()
graphmenu.add_command(label='cos', command=cosg)
graphmenu.add_separator()
graphmenu.add_command(label='tan', command=tang)
graphmenu.add_separator()
graphmenu.add_command(label='log', command=logg)
graphmenu.add_separator()
graphmenu.add_command(label='e', command=eg)
graphmenu.add_separator()
#graphmenu.add_command(label='ln_g',command=lng)
#graphmenu.add_separator()
graphmenu.add_command(label='a^x_g',command=ag)
#함숫값 메뉴
valuemenu = Menu(menubar, tearoff=0)
valuemenu.add_command(label='Help',command=help)
menubar.add_cascade(label='F_Grape', menu=graphmenu)
menubar.add_cascade(label='F_Value', menu=valuemenu)
#계산 과정판 만들기
ent=Entry(root,width = 45)
ent.insert(0, ' ')
ent.pack(pady=5)
#숫자 버튼 만들기
buttons = ['1qaz(', '2wsx)', '3edc[', '4rfv]','5tgb<','6yhn>','7ujm+','8ik,-','9ol.*','0p:%/','C^"&=']
for col in buttons:
frm=Frame(root)
frm.configure(bg="SkyBlue1")
frm.pack(side=LEFT)
for row in col :
btn=Button(frm, text=row, background="yellow2", command=(lambda char=row: enter(char)))
btn.pack(fill=X, padx=5, pady=5)
#프로그램 실행
root.config(menu=menubar,background="SkyBlue1")
root.mainloop()
|
mit
|
vikashvverma/machine-learning
|
mlbasic/Supervised/Project/visuals.py
|
3
|
5396
|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score, accuracy_score
def distribution(data, transformed = False):
"""
Visualization code for displaying skewed distributions of features
"""
# Create figure
fig = pl.figure(figsize = (11,5));
# Skewed feature plotting
for i, feature in enumerate(['capital-gain','capital-loss']):
ax = fig.add_subplot(1, 2, i+1)
ax.hist(data[feature], bins = 25, color = '#00A0A0')
ax.set_title("'%s' Feature Distribution"%(feature), fontsize = 14)
ax.set_xlabel("Value")
ax.set_ylabel("Number of Records")
ax.set_ylim((0, 2000))
ax.set_yticks([0, 500, 1000, 1500, 2000])
ax.set_yticklabels([0, 500, 1000, 1500, ">2000"])
# Plot aesthetics
if transformed:
fig.suptitle("Log-transformed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
else:
fig.suptitle("Skewed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
fig.tight_layout()
fig.show()
def evaluate(results, accuracy, f1):
"""
Visualization code to display results of various learners.
inputs:
- learners: a list of supervised learners
- stats: a list of dictionaries of the statistic results from 'train_predict()'
- accuracy: The score for the naive predictor
- f1: The score for the naive predictor
"""
# Create figure
fig, ax = pl.subplots(2, 3, figsize = (11,7))
# Constants
bar_width = 0.3
colors = ['#A00000','#00A0A0','#00A000']
# Super loop to plot four panels of data
for k, learner in enumerate(results.keys()):
for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']):
for i in np.arange(3):
# Creative plot code
ax[j//3, j%3].bar(i+k*bar_width, results[learner][i][metric], width = bar_width, color = colors[k])
ax[j//3, j%3].set_xticks([0.45, 1.45, 2.45])
ax[j//3, j%3].set_xticklabels(["1%", "10%", "100%"])
ax[j//3, j%3].set_xlabel("Training Set Size")
ax[j//3, j%3].set_xlim((-0.1, 3.0))
# Add unique y-labels
ax[0, 0].set_ylabel("Time (in seconds)")
ax[0, 1].set_ylabel("Accuracy Score")
ax[0, 2].set_ylabel("F-score")
ax[1, 0].set_ylabel("Time (in seconds)")
ax[1, 1].set_ylabel("Accuracy Score")
ax[1, 2].set_ylabel("F-score")
# Add titles
ax[0, 0].set_title("Model Training")
ax[0, 1].set_title("Accuracy Score on Training Subset")
ax[0, 2].set_title("F-score on Training Subset")
ax[1, 0].set_title("Model Predicting")
ax[1, 1].set_title("Accuracy Score on Testing Set")
ax[1, 2].set_title("F-score on Testing Set")
# Add horizontal lines for naive predictors
ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
# Set y-limits for score panels
ax[0, 1].set_ylim((0, 1))
ax[0, 2].set_ylim((0, 1))
ax[1, 1].set_ylim((0, 1))
ax[1, 2].set_ylim((0, 1))
# Create patches for the legend
patches = []
for i, learner in enumerate(results.keys()):
patches.append(mpatches.Patch(color = colors[i], label = learner))
pl.legend(handles = patches, bbox_to_anchor = (-.80, 2.53), \
loc = 'upper center', borderaxespad = 0., ncol = 3, fontsize = 'x-large')
# Aesthetics
pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10)
pl.tight_layout()
pl.show()
def feature_plot(importances, X_train, y_train):
# Display the five most important features
indices = np.argsort(importances)[::-1]
columns = X_train.columns.values[indices[:5]]
values = importances[indices][:5]
# Creat the plot
fig = pl.figure(figsize = (9,5))
pl.title("Normalized Weights for First Five Most Predictive Features", fontsize = 16)
pl.bar(np.arange(5), values, width = 0.6, align="center", color = '#00A000', \
label = "Feature Weight")
pl.bar(np.arange(5) - 0.3, np.cumsum(values), width = 0.2, align = "center", color = '#00A0A0', \
label = "Cumulative Feature Weight")
pl.xticks(np.arange(5), columns)
pl.xlim((-0.5, 4.5))
pl.ylabel("Weight", fontsize = 12)
pl.xlabel("Feature", fontsize = 12)
pl.legend(loc = 'upper center')
pl.tight_layout()
pl.show()
|
mit
|
seckcoder/lang-learn
|
python/sklearn/examples/decomposition/plot_image_denoising.py
|
1
|
5769
|
"""
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of Lena using online :ref:`DictionaryLearning` and various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print __doc__
from time import time
import pylab as pl
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print 'Distorting image...'
distorted = lena.copy()
distorted[:, height / 2:] += 0.075 * np.random.randn(width, height / 2)
# Extract all reference patches from the left half of the image
print 'Extracting reference patches...'
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height / 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print 'done in %.2fs.' % (time() - t0)
###############################################################################
# Learn the dictionary from reference patches
print 'Learning the dictionary... '
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print 'done in %.2fs.' % dt
pl.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
pl.subplot(10, 10, i + 1)
pl.imshow(comp.reshape(patch_size), cmap=pl.cm.gray_r,
interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
pl.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
pl.figure(figsize=(5, 3.3))
pl.subplot(1, 2, 1)
pl.title('Image')
pl.imshow(image, vmin=0, vmax=1, cmap=pl.cm.gray, interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.subplot(1, 2, 2)
difference = image - reference
pl.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
pl.imshow(difference, vmin=-0.5, vmax=0.5, cmap=pl.cm.PuOr,
interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.suptitle(title, size=16)
pl.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print 'Extracting noisy patches... '
t0 = time()
data = extract_patches_2d(distorted[:, height / 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print 'done in %.2fs.' % (time() - t0)
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print title, '... '
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height / 2:] = reconstruct_from_patches_2d(
patches, (width, height / 2))
dt = time() - t0
print 'done in %.2fs.' % dt
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
pl.show()
|
unlicense
|
precedenceguo/mxnet
|
example/kaggle-ndsb1/training_curves.py
|
52
|
1879
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
## based on https://github.com/dmlc/mxnet/issues/1302
## Parses the model fit log file and generates a train/val vs epoch plot
import matplotlib.pyplot as plt
import numpy as np
import re
import argparse
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="log_tr_va",
help='the path of log file')
args = parser.parse_args()
TR_RE = re.compile('.*?]\sTrain-accuracy=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-accuracy=([\d\.]+)')
log = open(args.log_file).read()
log_tr = [float(x) for x in TR_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.plot(idx, log_tr, 'o', linestyle='-', color="r",
label="Train accuracy")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation accuracy")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
|
apache-2.0
|
hrjn/scikit-learn
|
sklearn/neighbors/base.py
|
28
|
30649
|
"""Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist : ndarray
The input distances
weights : {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr : array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=self.n_jobs, squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
n_jobs=self.n_jobs,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
|
bsd-3-clause
|
kathleenleeper/bibmetrics
|
genderDistribution.py
|
1
|
5038
|
# Script calculates the percent of authors in a database with male, female, unisex, or unassigned names. Will count multiple authors once; accuracy of gender assignment has been validated by a (not-particuarly random) set of 100 names. cu
#system functions
from __future__ import division
import os
import sys
from datetime import datetime #system date
import csv
import argparse
from collections import Counter
#gendering
from genderComputer.genderComputer import GenderComputer
#parsing
import bibtexparser as b #module for bibtex parsing, obviously
from bibtexparser.bparser import BibTexParser #add customization
from bibtexparser.customization import *
#plot stuff; transitioning to seaborn asap
import numpy as np
import seaborn
def startUp(bib):
today = datetime.today()
#gc = GenderComputer(os.path.abspath('genderComputer/nameLists')) #make gendercomputer
bib = bib
records = parseFile(bib)
data = getDBCounts(records)
return data
def customizations(record):
"""Use some functions delivered by the library
:param record: a record
:returns: -- customized record
"""
record = type(record)
record = doi(record)
record = convert_to_unicode(record)
record = author(record)
return record
def parseFile(bib_file):
"""parse the bib file
:param bib_file: bibtex file to be parsed
:returns: -- a bibtex file object
"""
with open(bib_file) as bibtex_file:
parser = BibTexParser() #import the parsers
parser.homogenize = True
parser.customization = customizations #add some customizations defined earlier
data = b.load(bibtex_file, parser = parser) #process data yah!
return data
def clean_tex(s): #tex files are super gross; lil hacky though + might be losing data
badSubstrings = ["{","}"]
for badSubstring in badSubstrings:
s = s.replace(badSubstring, "")
return s
### the workhorse ###
def getDBCounts(data):
#set variables to count all the things with a Counter object!
c = Counter({"authorCount":0,
"unisex":0,
"women":0,
"men":0,
"unavailable":0,
"no_author":[],
"no_title":[],
"no_gender":[]
})
gc = GenderComputer(os.path.abspath('genderComputer/nameLists')) #make gendercomputer for defining names
def _countGenders(authors, c, gc=gc): #defining an inner function cause it's faster to call this way
for author in authors:
gender = gc.resolveGender(clean_tex(author), None) #TODO: feed affiliation in as a starting place to look for name assignment
if gender == 'male': c["men"] += 1
elif gender == 'female': c["women"] += 1
elif gender == 'unisex': c ["unisex"] +=1
else:
c["unavailable"] += 1
c["no_gender"].append(author)
def _countJournals(entry = ""):
return #add in Joel's fn to count journals w. regexs; or use other tactics?
#needs a c["journals"] field? doesn't retain journal - gender link, as currently conceptualized
#not sure how to use this info; BUT probably when it has a "check if OA" option it'll need to be its own function
##############################
### the actual processing ###
for entry in data.entries: #for each paper processed
#get titles + author lists sorted out
title = clean_tex(entry["title"]) if "title" in entry else c["no_title"].append(entry)
if "author" in entry:
authors = entry["author"]
c["authorCount"] = c["authorCount"] + len(authors)
else: c["no_author"].append(title)
_countGenders(authors, c)
_countJournals(c)
#append the length of the author list to the author count
return c #return all the data in a counter format! it's a pain to work with I think;; or at least seaborn doesn't like it much :(
if __name__ == '__main__':
d = startUp(sys.argv[1])
print "\ntotal authors found: {}".format(d['authorCount'])
print "assigned men: {}".format(d['men'])
print "assigned women: {}".format(d['women'])
print "assigned unisex: {}".format(d['unisex'])
print "unassigned: {}".format(d['unavailable'])
# """
# TODO: more maintained gender calculator?
# TODO: turn hacky stats into a function; decide if I want this to be a script or more interactive
#
# ##########################
# ###Old Stuff###
#
# for key in stats:
# value = stats[key]
# percent = value/auCount*100 #probably should fix so it can't break if dividing by zero
# percents[key] = percent
#
#
# print stats
# print percents
# print auCount
#
# plt.bar(range(len(stats)), percents.values(), align='center', color="#2aa198")
# plt.xticks(range(len(percents)), percents.keys(), color="#657b83")
# plt.xlabel('Genders' + '\n' + '(plot generated ' + 'May 14 2015' +')', color="#073642")
# plt.ylabel('"""', color="#073642")
#
#
# #plt.show()
# """"
|
gpl-3.0
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/manifold/tests/test_t_sne.py
|
3
|
31389
|
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import cosine_distances
x = np.linspace(0, 1, 10)
xx, yy = np.meshgrid(x, x)
X_2d_grid = np.hstack([
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
])
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, 1:k].astype(np.int64)
distances_nn = np.array([distances[k, neighbors_nn[k]]
for k in range(n_samples)])
P2 = _binary_search_perplexity(distances_nn, neighbors_nn,
desired_perplexity, verbose=0)
P_nn = np.array([P1[k, neighbors_nn[k]] for k in range(n_samples)])
assert_array_almost_equal(P_nn, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 5):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
distances_nn = np.array([distances[k, neighbors_nn[k]]
for k in range(n_samples)])
P2k = _binary_search_perplexity(distances_nn, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
idx = np.argsort(P2k.ravel())[::-1]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
# Convert the sparse matrix to a dense one for testing
P1 = P1.toarray()
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(50, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
t = trustworthiness(X, X_embedded, n_neighbors=1)
assert_greater(t, 0.9)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [250, 300, 350]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
for i in range(3):
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
early_exaggeration=2.0, metric="precomputed",
random_state=i, verbose=0)
X_embedded = tsne.fit_transform(D)
t = trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True)
assert t > .95
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_non_positive_precomputed_distances():
# Precomputed distance matrices must be positive.
bad_dist = np.array([[0., -1.], [1., 0.]])
for method in ['barnes_hut', 'exact']:
tsne = TSNE(metric="precomputed", method=method)
assert_raises_regexp(ValueError, "All distances .*precomputed.*",
tsne.fit_transform, bad_dist)
def test_non_positive_computed_distances():
# Computed distance matrices must be positive.
def metric(x, y):
return -1
tsne = TSNE(metric=metric, method='exact')
X = np.array([[0.0, 0.0], [1.0, 1.0]])
assert_raises_regexp(ValueError, "All distances .*metric given.*",
tsne.fit_transform, X)
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
tsne = TSNE(init="not available")
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, tsne.fit_transform,
np.array([[0.0], [1.0]]))
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available", method='exact')
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
tsne = TSNE(metric="not available", method='barnes_hut')
assert_raises_regexp(ValueError, "Metric 'not available' not valid.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_method_not_available():
# 'nethod' must be 'barnes_hut' or 'exact'
tsne = TSNE(method='not available')
assert_raises_regexp(ValueError, "'method' must be 'barnes_hut' or ",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_angle_out_of_range_checks():
# check the angle parameter range
for angle in [-1, -1e-6, 1 + 1e-6, 2]:
tsne = TSNE(angle=angle)
assert_raises_regexp(ValueError, "'angle' must be between 0.0 - 1.0",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_n_components_range():
# barnes_hut method should only be used with n_components <= 3
tsne = TSNE(n_components=4, method="barnes_hut")
assert_raises_regexp(ValueError, "'n_components' should be .*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_early_exaggeration_used():
# check that the ``early_exaggeration`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=1.0)
X_embedded1 = tsne.fit_transform(X)
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=10.0)
X_embedded2 = tsne.fit_transform(X)
assert not np.allclose(X_embedded1, X_embedded2)
def test_n_iter_used():
# check that the ``n_iter`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
for n_iter in [251, 500]:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=0.5, init="random", random_state=0,
method=method, early_exaggeration=1.0, n_iter=n_iter)
tsne.fit_transform(X)
assert tsne.n_iter_ == n_iter - 1
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("nearest neighbors..." in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("early exaggeration" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(50, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0)
X_embedded = tsne.fit_transform(X)
effective_type = X_embedded.dtype
# tsne cython code is only single precision, so the output will
# always be single precision, irrespectively of the input dtype
assert effective_type == np.float32
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(params, P, degrees_of_freedom,
n_samples, n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
distances_nn = np.array([distances[i, neighbors_nn[i]]
for i in range(n_samples)])
assert np.all(distances[0, neighbors_nn[0]] == distances_nn[0]),\
abs(distances[0, neighbors_nn[0]] - distances_nn[0])
P_bh = _joint_probabilities_nn(distances_nn, neighbors_nn,
perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(params, P_bh, degrees_of_freedom,
n_samples, n_components,
angle=angle, skip_num_points=0,
verbose=0)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 10)
for method in ["barnes_hut", "exact"]:
tsne = TSNE(n_iter_without_progress=-1, verbose=2, learning_rate=1e8,
random_state=0, method=method, n_iter=351, init="random")
tsne._N_ITER_CHECK = 1
tsne._EXPLORATION_N_ITER = 0
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last -1 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '').split(' ')[0]
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
def check_uniform_grid(method, seeds=[0, 1, 2], n_iter=1000):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
for seed in seeds:
tsne = TSNE(n_components=2, init='random', random_state=seed,
perplexity=20, n_iter=n_iter, method=method)
Y = tsne.fit_transform(X_2d_grid)
try_name = "{}_{}".format(method, seed)
try:
assert_uniform_grid(Y, try_name)
except AssertionError:
# If the test fails a first time, re-run with init=Y to see if
# this was caused by a bad initialization. Note that this will
# also run an early_exaggeration step.
try_name += ":rerun"
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
assert_uniform_grid(Y, try_name)
def assert_uniform_grid(Y, try_name=None):
# Ensure that the resulting embedding leads to approximately
# uniformly spaced points: the distance to the closest neighbors
# should be non-zero and approximately constant.
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert_greater(smallest_to_mean, .5, msg=try_name)
assert_less(largest_to_mean, 2, msg=try_name)
def test_uniform_grid():
for method in ['barnes_hut', 'exact']:
yield check_uniform_grid, method
def test_bh_match_exact():
# check that the ``barnes_hut`` method match the exact one when
# ``angle = 0`` and ``perplexity > n_samples / 3``
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features).astype(np.float32)
X_embeddeds = {}
n_iter = {}
for method in ['exact', 'barnes_hut']:
tsne = TSNE(n_components=2, method=method, learning_rate=1.0,
init="random", random_state=0, n_iter=251,
perplexity=30.0, angle=0)
# Kill the early_exaggeration
tsne._EXPLORATION_N_ITER = 0
X_embeddeds[method] = tsne.fit_transform(X)
n_iter[method] = tsne.n_iter_
assert n_iter['exact'] == n_iter['barnes_hut']
assert_array_almost_equal(X_embeddeds['exact'], X_embeddeds['barnes_hut'],
decimal=3)
def test_tsne_with_different_distance_metrics():
"""Make sure that TSNE works for different distance metrics"""
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
X = random_state.randn(50, n_components_original).astype(np.float32)
metrics = ['manhattan', 'cosine']
dist_funcs = [manhattan_distances, cosine_distances]
for metric, dist_func in zip(metrics, dist_funcs):
X_transformed_tsne = TSNE(
metric=metric, n_components=n_components_embedding,
random_state=0).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric='precomputed', n_components=n_components_embedding,
random_state=0).fit_transform(dist_func(X))
assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)
|
mit
|
imbasimba/astroquery
|
astroquery/vo_conesearch/conesearch.py
|
2
|
17045
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Support VO Simple Cone Search capabilities."""
# STDLIB
import warnings
# THIRD-PARTY
import numpy as np
# ASTROPY
from astropy.io.votable.exceptions import vo_warn, W25
from astropy.utils.console import color_print
from astropy.utils.exceptions import AstropyUserWarning
# LOCAL
from . import vos_catalog
from .vo_async import AsyncBase
from .core import ConeSearch, _validate_sr
from .exceptions import ConeSearchError
from ..exceptions import NoResultsWarning
from ..utils.timer import timefunc, RunTimePredictor
# Import configurable items declared in __init__.py
from . import conf
__all__ = ['AsyncConeSearch', 'conesearch', 'AsyncSearchAll', 'search_all',
'list_catalogs', 'predict_search', 'conesearch_timer']
# Skip these doctests
__doctest_skip__ = ['AsyncConeSearch', 'AsyncSearchAll']
class AsyncConeSearch(AsyncBase):
"""
Perform a Cone Search asynchronously and returns the result of the
first successful query.
.. note::
See :class:`~astroquery.vo_conesearch.vo_async.AsyncBase`
for more details.
Parameters
----------
args, kwargs
See :func:`conesearch`.
Examples
--------
>>> from astropy import coordinates as coord
>>> from astropy import units as u
>>> from astroquery.vo_conesearch import conesearch
>>> c = coord.ICRS(6.0223 * u.degree, -72.0814 * u.degree)
>>> async_search = conesearch.AsyncConeSearch(
... c, 0.5 * u.degree,
... catalog_db='Guide Star Catalog 2.3 Cone Search 1')
Check search status:
>>> async_search.running()
True
>>> async_search.done()
False
Get search results after a 30-second wait (not to be
confused with ``astroquery.vo_conesearch.conf.timeout`` that
governs individual Cone Search queries). If search is still not
done after 30 seconds, `TimeoutError` is raised. Otherwise,
Cone Search result is returned and can be manipulated as in
:ref:`Simple Cone Search Examples <vo-sec-scs-examples>`.
If no ``timeout`` keyword given, it waits until completion:
>>> async_result = async_search.get(timeout=30)
>>> len(async_result)
74271
"""
def __init__(self, *args, **kwargs):
super(AsyncConeSearch, self).__init__(conesearch, *args, **kwargs)
def conesearch(center, radius, *, verb=1, catalog_db=None,
verbose=True, cache=True, query_all=False,
return_astropy_table=True, use_names_over_ids=False):
"""
Perform Cone Search and returns the result of the
first successful query.
.. note::
Use ``astroquery.vo_conesearch.conf.pedantic`` to control
pedantry. When `True`, will raise an error when the result
violates the spec, otherwise issue a warning. Warnings may
be controlled using :py:mod:`warnings` module.
.. note::
Use ``astroquery.vo_conesearch.conf.timeout`` to control
timeout limit in seconds for each service being queried.
Parameters
----------
center : str, `astropy.coordinates` object, list, or tuple
Position of the center of the cone to search.
It may be specified as an object from the
:ref:`astropy:astropy-coordinates` package,
string as accepted by
:func:`~astroquery.utils.parse_coordinates`, or tuple/list.
If given as tuple or list, it is assumed to be ``(RA, DEC)``
in the ICRS coordinate frame, given in decimal degrees.
radius : float or `~astropy.units.quantity.Quantity`
Radius of the cone to search:
- If float is given, it is assumed to be in decimal degrees.
- If astropy quantity is given, it is internally converted
to degrees.
verb : {1, 2, 3}
Verbosity indicating how many columns are to be returned
in the resulting table. Support for this parameter by
a Cone Search service implementation is optional. If the
service supports the parameter:
1. Return the bare minimum number of columns that
the provider considers useful in describing the
returned objects.
2. Return a medium number of columns between the
minimum and maximum (inclusive) that are
considered by the provider to most typically
useful to the user.
3. Return all of the columns that are available for
describing the objects.
If not supported, the service should ignore the parameter
and always return the same columns for every request.
catalog_db
May be one of the following, in order from easiest to
use to most control:
- `None`: A database of
``astroquery.vo_conesearch.conf.conesearch_dbname`` catalogs is
downloaded from ``astroquery.vo_conesearch.conf.vos_baseurl``.
The first catalog in the database to successfully return a
result is used.
- *catalog name*: A name in the database of
``astroquery.vo_conesearch.conf.conesearch_dbname`` catalogs at
``astroquery.vo_conesearch.conf.vos_baseurl`` is used.
For a list of acceptable names, use
:func:`astroquery.vo_conesearch.vos_catalog.list_catalogs`.
- *url*: The prefix of a URL to a IVOA Service for
``astroquery.vo_conesearch.conf.conesearch_dbname``.
Must end in either '?' or '&'.
- `~astroquery.vo_conesearch.vos_catalog.VOSCatalog` object: A
specific catalog manually downloaded and selected from the
database (see :ref:`vo-sec-client-vos`).
- Any of the above 3 options combined in a list, in which case
they are tried in order.
verbose : bool
Verbose output.
cache : bool
Use caching for VO Service database. Access to actual VO
websites referenced by the database still needs internet
connection.
query_all : bool
This is used by :func:`search_all`.
return_astropy_table : bool
Returned ``obj`` will be `astropy.table.Table` rather
than `astropy.io.votable.tree.Table`.
use_names_over_ids : bool
When `True` use the ``name`` attributes of columns as the names
of columns in the `~astropy.table.Table` instance. Since names
are not guaranteed to be unique, this may cause some columns
to be renamed by appending numbers to the end. Otherwise
(default), use the ID attributes as the column names.
Returns
-------
obj : `astropy.table.Table` or `astropy.io.votable.tree.Table`
First table from first successful VO service request.
See ``return_astropy_table`` parameter for the kind of table returned.
Raises
------
ConeSearchError
When invalid inputs are passed into Cone Search.
"""
n_timed_out = 0
service_type = conf.conesearch_dbname
catalogs = vos_catalog._get_catalogs(
service_type, catalog_db, cache=cache, verbose=verbose)
if query_all:
result = {}
else:
result = None
for name, catalog in catalogs:
if isinstance(catalog, str):
if catalog.startswith('http'):
url = catalog
else:
remote_db = vos_catalog.get_remote_catalog_db(
service_type, cache=cache, verbose=verbose)
catalog = remote_db.get_catalog(catalog)
url = catalog['url']
else:
url = catalog['url']
if verbose: # pragma: no cover
color_print('Trying {0}'.format(url), 'green')
try:
r = ConeSearch.query_region(
center, radius, verb=verb, cache=cache, verbose=verbose,
service_url=url, return_astropy_table=return_astropy_table,
use_names_over_ids=use_names_over_ids)
except Exception as e:
err_msg = str(e)
vo_warn(W25, (url, err_msg))
if not query_all and 'ConnectTimeoutError' in err_msg:
n_timed_out += 1
else:
if r is not None:
if query_all:
result[r.url] = r
else:
result = r
break
if result is None and n_timed_out > 0:
err_msg = ('None of the available catalogs returned valid results.'
' ({0} URL(s) timed out.)'.format(n_timed_out))
warnings.warn(err_msg, NoResultsWarning)
return result
class AsyncSearchAll(AsyncBase):
"""
Perform a Cone Search asynchronously, storing all results
instead of just the result from first successful query.
.. note::
See :class:`~astroquery.vo_conesearch.vo_async.AsyncBase`
for more details.
Parameters
----------
args, kwargs
See :func:`search_all`.
Examples
--------
>>> from astropy import coordinates as coord
>>> from astropy import units as u
>>> from astroquery.vo_conesearch import conesearch
>>> c = coord.ICRS(6.0223 * u.degree, -72.0814 * u.degree)
>>> async_search = conesearch.AsyncSearchAll(c, 0.5 * u.degree)
Check search status:
>>> async_search.running()
True
>>> async_search.done()
False
Get a dictionary of all search results after a 30-second wait (not
to be confused with ``astroquery.vo_conesearch.conf.timeout`` that
governs individual Cone Search queries). If search is still not
done after 30 seconds, `TimeoutError` is raised. Otherwise, a
dictionary is returned and can be manipulated as in
:ref:`Simple Cone Search Examples <vo-sec-scs-examples>`.
If no ``timeout`` keyword given, it waits until completion:
>>> async_allresults = async_search.get(timeout=60)
>>> all_catalogs = list(async_allresults)
>>> first_cone_arr = async_allresults[all_catalogs[0]]
>>> len(first_cone_arr)
74271
"""
def __init__(self, *args, **kwargs):
AsyncBase.__init__(self, search_all, *args, **kwargs)
def search_all(*args, **kwargs):
"""
Perform Cone Search and returns the results of
all successful queries.
.. warning::
Could potentially take up significant run time and
computing resources.
Parameters
----------
args, kwargs
Arguments and keywords accepted by :func:`conesearch`.
Returns
-------
result : dict of `astropy.io.votable.tree.Table` objects
A dictionary of tables from successful VO service requests,
with keys being the access URLs. If none is successful,
an empty dictionary is returned.
Raises
------
ConeSearchError
When invalid inputs are passed into Cone Search.
"""
kwargs['query_all'] = True
return conesearch(*args, **kwargs)
def list_catalogs(**kwargs):
"""
Return the available Cone Search catalogs as a list of strings.
These can be used for the ``catalog_db`` argument to
:func:`conesearch`.
Parameters
----------
cache : bool
Use caching for VO Service database. Access to actual VO
websites referenced by the database still needs internet
connection.
verbose : bool
Show download progress bars.
pattern : str or `None`
If given string is anywhere in a catalog name, it is
considered a matching catalog. It accepts patterns as
in :py:mod:`fnmatch` and is case-insensitive.
By default, all catalogs are returned.
sort : bool
Sort output in alphabetical order. If not sorted, the
order depends on dictionary hashing. Default is `True`.
Returns
-------
arr : list of str
List of catalog names.
"""
return vos_catalog.list_catalogs(conf.conesearch_dbname, **kwargs)
def predict_search(url, *args, **kwargs):
"""
Predict the run time needed and the number of objects
for a Cone Search for the given access URL, position, and
radius.
Run time prediction uses `astroquery.utils.timer.RunTimePredictor`.
Baseline searches are done with starting and ending radii at
0.05 and 0.5 of the given radius, respectively.
Extrapolation on good data uses least-square straight line fitting,
assuming linear increase of search time and number of objects
with radius, which might not be accurate for some cases. If
there are less than 3 data points in the fit, it fails.
Warnings (controlled by :py:mod:`warnings`) are given when:
#. Fitted slope is negative.
#. Any of the estimated results is negative.
#. Estimated run time exceeds
``astroquery.vo_conesearch.conf.timeout``.
.. note::
If ``verbose=True``, extra log info will be provided.
But unlike :func:`conesearch_timer`, timer info is suppressed.
The predicted results are just *rough* estimates.
Prediction is done using
:class:`astroquery.vo_conesearch.core.ConeSearchClass`.
Prediction for :class:`AsyncConeSearch` is not supported.
Parameters
----------
url : str
Cone Search access URL to use.
plot : bool
If `True`, plot will be displayed.
Plotting uses `matplotlib <http://matplotlib.sourceforge.net/>`_.
args, kwargs
See :meth:`astroquery.vo_conesearch.core.ConeSearchClass.query_region`.
Returns
-------
t_est : float
Estimated time in seconds needed for the search.
n_est : int
Estimated number of objects the search will yield.
Raises
------
AssertionError
If prediction fails.
ConeSearchError
If input parameters are invalid.
VOSError
If VO service request fails.
"""
if len(args) != 2: # pragma: no cover
raise ConeSearchError('conesearch must have exactly 2 arguments')
kwargs['service_url'] = url
kwargs['return_astropy_table'] = False
plot = kwargs.pop('plot', False)
center, radius = args
sr = _validate_sr(radius)
if sr <= 0:
raise ConeSearchError('Search radius must be > 0 degrees')
# Not using default ConeSearch instance because the attributes are
# tweaked to match user inputs to this function.
cs_pred = RunTimePredictor(ConeSearch.query_region, center, **kwargs)
# Search properties for timer extrapolation
num_datapoints = 10 # Number of desired data points for extrapolation
sr_min = 0.05 * sr # Min radius to start the timer
sr_max = 0.5 * sr # Max radius to stop the timer
sr_step = (1.0 / num_datapoints) * (sr_max - sr_min) # Radius step
# Slowly increase radius to get data points for extrapolation
sr_arr = np.arange(sr_min, sr_max + sr_step, sr_step)
cs_pred.time_func(sr_arr)
# Predict run time
t_coeffs = cs_pred.do_fit()
t_est = cs_pred.predict_time(sr)
if t_est < 0 or t_coeffs[1] < 0: # pragma: no cover
warnings.warn(
'Estimated runtime ({0} s) is non-physical with slope of '
'{1}'.format(t_est, t_coeffs[1]), AstropyUserWarning)
elif t_est > conf.timeout: # pragma: no cover
warnings.warn(
'Estimated runtime is longer than timeout of '
'{0} s'.format(conf.timeout), AstropyUserWarning)
# Predict number of objects
sr_arr = sorted(cs_pred.results) # Orig with floating point error
n_arr = [cs_pred.results[key].array.size for key in sr_arr]
n_coeffs = np.polyfit(sr_arr, n_arr, 1)
n_fitfunc = np.poly1d(n_coeffs)
n_est = int(round(n_fitfunc(sr)))
if n_est < 0 or n_coeffs[0] < 0: # pragma: no cover
warnings.warn('Estimated #objects ({0}) is non-physical with slope of '
'{1}'.format(n_est, n_coeffs[0]), AstropyUserWarning)
if plot: # pragma: no cover
import matplotlib.pyplot as plt
xlabeltext = 'radius (deg)'
sr_fit = np.append(sr_arr, sr)
n_fit = n_fitfunc(sr_fit)
cs_pred.plot(xlabeltext=xlabeltext)
fig, ax = plt.subplots()
ax.plot(sr_arr, n_arr, 'kx-', label='Actual')
ax.plot(sr_fit, n_fit, 'b--', label='Fit')
ax.scatter([sr], [n_est], marker='o', c='r', label='Predicted')
ax.set_xlabel(xlabeltext)
ax.set_ylabel('#objects')
ax.legend(loc='best', numpoints=1)
plt.draw()
return t_est, n_est
@timefunc(1)
def conesearch_timer(*args, **kwargs):
"""
Time a single Cone Search using `astroquery.utils.timer.timefunc`
with a single try and a verbose timer.
Parameters
----------
args, kwargs
See :func:`conesearch`.
Returns
-------
t : float
Run time in seconds.
obj : `astropy.io.votable.tree.Table`
First table from first successful VO service request.
"""
return conesearch(*args, **kwargs)
|
bsd-3-clause
|
sgrieve/iverson_2000
|
Iverson_funcs.py
|
1
|
24546
|
from __future__ import print_function
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib.patches as patches
label_size = 8
axis_size = 12
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = label_size
rcParams['xtick.major.size'] = 4
rcParams['ytick.major.size'] = 4
rcParams['legend.fontsize'] = label_size
rcParams['legend.handletextpad'] = 0.05
rcParams['legend.labelspacing'] = 0.1
rcParams['legend.columnspacing'] = 0.1
#==============================================================================
def minutes_to_secs(minutes):
'''
Convert a number of days to a number of seconds.
'''
minutes = np.asarray(minutes)
if minutes.size == 1:
return minutes * 60.
else:
return np.multiply(minutes, 60.)
#==============================================================================
#==============================================================================
def days_to_secs(days):
'''
Convert a number of days to a number of seconds.
'''
days = np.asarray(days)
if days.size == 1:
return days * 24. * 60. * 60.
else:
return np.multiply(days, 86400.)
#==============================================================================
#==============================================================================
def weeks_to_secs(weeks):
'''
Convert a number of weeks to a number of seconds.
'''
weeks = np.asarray(weeks)
if weeks.size == 1:
return days_to_secs(weeks * 7.)
else:
return np.multiply(days_to_secs(weeks), 7.)
#==============================================================================
#==============================================================================
def days_to_weeks(days):
'''
Convert a number of days to a number of weeks.
'''
days = np.asarray(days)
if days.size == 1:
return days / 7.
else:
return np.divide(days, 7.)
#==============================================================================
#==============================================================================
def array_erfc(XX):
'''
Apply the math.erfc() function to an array or list of values, returning a
numpy array.
Not currently used, but may be useful.
'''
X = np.asarray(XX)
#print X.size
#print "The array is:"
#print X
data = []
if X.size == 1:
return math.erfc(X)
else:
for x in X:
data.append(math.erfc(x))
return np.array(data)
#==============================================================================
#==============================================================================
def Z_fn(x, z, alpha):
'''
Calculate Z, the elevation head, from the equations given in Figure 3's
caption.
Alpha is the gradient, x and z are coordinates.
If Z and z share an orign, it simplifies to:
return z / np.cos(alpha)
'''
term1 = np.multiply(x, np.sin(alpha))
term2 = np.multiply(z, np.cos(alpha))
return (np.add(term1, term2))
#==============================================================================
#==============================================================================
def D_hat_fn(Do, alpha):
'''
Equation 26c - compute an effective hydraulic diffusivity.
Do is a reference diffusivity and alpha is the slope gradient.
'''
return 4. * Do * (np.cos(alpha) ** 2.)
#==============================================================================
#==============================================================================
def Iz_over_Kz_steady(alpha, Iz_over_Kz):
'''
Calculate the Iz_over_Kz_steady constant used in beta.
Alpha is the gradient and Iz_over_Kz_steady is Steady state vertical water
influx rate, Iverson sets this to 0.1 in Figures 7 and 8, but he also defines
it as a function of Iz_over_Kz near the top of the first column on page 1902
'''
return np.cos(alpha) * Iz_over_Kz
#==============================================================================
#==============================================================================
def Beta_fn(alpha, Iz_over_Kz_steady):
'''
Calculate the beta constant used in equation 27a and b.
Alpha is the gradient and Iz_over_Kz_steady is Steady state vertical water
influx rate, Iverson sets this to 0.1 in Figures 7 and 8.
'''
return (np.cos(alpha) ** 2.) - Iz_over_Kz_steady
#==============================================================================
#==============================================================================
def Beta_line(Z, beta):
'''
Returns the "beta line", which is the maximum pore pressure
'''
beta_line = beta * Z
return beta_line
#==============================================================================
def Correct_psi(Z,Psi,beta):
'''
Reduces Psi to the beta line, which is the theoretical maximum pore pressure
'''
#print("Psi values are: ")
#print(Psi)
for i,Psi_val in enumerate(Psi):
this_beta = Z[i]*beta
if Psi_val > this_beta:
Psi[i] = this_beta
return Psi
#==============================================================================
def t_T_star_fn(t, D_hat, Z):
'''
Equation 27c and 27d to nondimensionalise the time parameters.
D_hat is an effective hydraulic diffusivity and Z is the elevation head.
Both have the same form, but 27c takes t (time) whereas d takes T (rainfall
duration).
'''
Z2 = np.multiply(Z, Z)
denominator = np.multiply(Z2, D_hat)
return np.divide(t, denominator)
#==============================================================================
#==============================================================================
def R_fn(t_star):
'''
Compute the response function for a t* value.
Equation 27e
'''
multiple_bit = np.multiply(np.sqrt(t_star / np.pi), np.exp(-1. / t_star))
one_ov_sqrt_tstar = 1. / np.sqrt(t_star)
#print "Numbers are (mulitply, one ov tstar): "
#print multiple_bit
#print one_ov_sqrt_tstar
#print "is this the problem??"
#print array_erfc(one_ov_sqrt_tstar)
#print "R_fn is"
#print (np.subtract(multiple_bit,array_erfc(one_ov_sqrt_tstar)))
return np.subtract(multiple_bit, array_erfc(one_ov_sqrt_tstar))
#==============================================================================
#==============================================================================
def psi(Z, beta, d, Iz_over_Kz, t_star, T_star):
'''
Compute psi from equation 27a and b
This is slightly confusing because the dimensional time is nondimensionalised
By depth (see equations 27c,d), so that for a single t_star or T_star value,
the dimensional time varies with depth
'''
# This is effectively the steady state water table (or initial condition)
# Iverson seems to just pull this out of the air. Perhaps it is based on
# measurements?
first_term = beta * (Z - d)
#print "first_term is: "
#print first_term
#print "For a t_star of: "+str(t_star)+" R_fn is"
#print R_fn(t_star)
# This solves the equation, based on the response function (R_fn),
# which is equation 27e
if t_star < T_star:
second_term = Z * Iz_over_Kz * R_fn(t_star)
#print "Second term is: " + str(second_term)
else:
second_term = Z * Iz_over_Kz * (R_fn(t_star) - R_fn(t_star - T_star))
#print "Second term is (t_star > T_star): " + str(second_term)
psi = first_term + second_term
return psi
#==============================================================================
#==============================================================================
def psi_transient(Z, beta, d, Iz_over_Kz, t_star, T_star):
'''
Compute psi from equation 27a and b
This one only has the transient component
'''
# This solves the equation, based on the response function (R_fn),
# which is equation 27e
if t_star < T_star:
second_term = Z * Iz_over_Kz * R_fn(t_star)
#print "Second term is: " + str(second_term)
else:
second_term = Z * Iz_over_Kz * (R_fn(t_star) - R_fn(t_star - T_star))
#print "Second term is (t_star > T_star): " + str(second_term)
psi = second_term
return psi
#==============================================================================
#==============================================================================
def psi_dimensional_t(Z, beta, d, Iz_over_Kz, D_hat, t, T):
'''
Compute psi from equation 27a and b, but using dimensional time
A bit slow since I haven't vectorised the calculations.
Times need to be in seconds
'''
psi_dimensional = []
for z in Z:
# first get the nondimensional time. Note that according to
# equations 27c,d the dimensionless time is a function of depth,
# so each point below the surface has a different t_star and T_star
t_star = t * D_hat / (z * z)
T_star = T * D_hat / (z * z)
# Now calculate psi on the basis of the dimensional psi
this_psi = psi(z, beta, d, Iz_over_Kz, t_star, T_star)
psi_dimensional.append(this_psi)
return psi_dimensional
#==============================================================================
#==============================================================================
def psi_dimensional_t_transient(Z, beta, d, Iz_over_Kz, D_hat, t, T):
'''
Compute psi from equation 27a and b, but using dimensional time
A bit slow since I haven't vectorised the calculations.
Only calculates the transient component of psi for use with
time series of rainfall
times need to be in seconds
'''
psi_dimensional = []
for z in Z:
# first get the nondimensional time. Note that according to
# equations 27c,d the dimensionless time is a function of depth,
# so each point below the surface has a different t_star and T_star
t_star = t * D_hat / (z * z)
T_star = T * D_hat / (z * z)
# Now calculate psi on the basis of the dimensional psi
this_psi = psi_transient(z, beta, d, Iz_over_Kz, t_star, T_star)
psi_dimensional.append(this_psi)
print("z: "+str(z)+" t_star: "+str(t_star)+" T_star: "+ str(T_star))
return psi_dimensional
#==============================================================================
#==============================================================================
# This loads a time series that has rain two columns, a rainfall duration
# and a rainfall rate.
# It then superposes these to get the Psi value at a given time
#==============================================================================
def psi_dimensional_from_time_series(durations,intensities,Z, beta, d, D_hat, t):
# this is the steady component of psi. Basically transient pressure builds
# on top of this. Not clear where Iverson comes up with these numbers.
steady_psi = beta * (Z - d)
cumulative_psi = np.asarray(steady_psi)
print("steady psi is: ")
print(steady_psi)
print("t is: "+str(t))
# Now we try to construct the transient pressure.
# loop through the record getting cumulative times
starting_times = []
starting_times.append(0)
cumulative_time = 0
count = 0
end_count_found = False
end_count = 0
for this_duration in durations:
cumulative_time = cumulative_time+this_duration
# the cumulative time is the time at the end of this timestep.
# if the cumulative time is less than the time of simulation,
# then we need to acount for this pulse of rainfall
if t < cumulative_time:
if end_count_found == False:
end_count_found = True
end_count = count
count = count+1
starting_times.append(cumulative_time)
# we don't need the last element
del starting_times[-1]
print("HEYHEYHEY")
print(starting_times)
# If we didn't find the end count it means the rainfall records have ended and we need
# all of the data
if end_count_found == False:
end_count = count-1 # The minus one is needed since we have counted past the end of the index
print("end count is: "+str(end_count))
print("\n\n\n\nStarting times")
print(starting_times)
print("durations")
print(durations)
print("intensities")
print(intensities)
print("\n\n\n\n")
# okay, now get the transients from superposition
# First we need to figure out how many of these we will need
#print("end count is: " + str(end_count))
for i,time in enumerate(starting_times):
#print("time is: "+str(time))
if i <= end_count:
eff_t = t-time
this_intensity = intensities[i]
this_duration = durations[i]
print("\n\n\n\n")
print("Eff T: "+str(eff_t)+" and intensity is: "+str(this_intensity)+" and duration is: " +str(this_duration))
# now get the psi values.
this_psi = psi_dimensional_t_transient(Z, beta, d, this_intensity, D_hat, eff_t, this_duration)
print("this psi is:")
print(this_psi)
print("\n\n\n\n")
cumulative_psi = np.add(cumulative_psi,this_psi)
#print("this psi is:")
#print(this_psi)
#print ("cumulative psi is: ")
#print(cumulative_psi)
return cumulative_psi
#==============================================================================
def F_f(alpha, friction_angle):
'''
Compute F_f from equation 28
'''
tan_alpha = np.tan(alpha)
tan_friction_angle = np.tan(friction_angle)
return np.divide(tan_friction_angle, tan_alpha)
#==============================================================================
#==============================================================================
def F_c(cohesion, weight_of_soil, Z, alpha):
'''
Compute F_c from equation 28
'''
denominator_1 = np.multiply(weight_of_soil, Z)
denominator_2 = np.multiply(np.sin(alpha), np.cos(alpha))
denominator = np.multiply(denominator_1, denominator_2)
#print("Denominator 1 is:")
#print(denominator_1)
#print("Denominator 2 is:")
#print(denominator_2)
#print("Entire denominator: ")
#print(denominator)
return np.divide(cohesion, denominator)
#==============================================================================
#==============================================================================
def F_w(psi_val, weight_of_water, weight_of_soil, alpha, friction_angle, Z):
'''
Compute F_w from equation 28
The Psi value is the dimensional Psi at a dimensional time.
'''
numerator_1 = np.multiply(psi_val, weight_of_water)
numerator_2 = np.multiply(numerator_1, np.tan(friction_angle))
numerator = np.multiply(-1., numerator_2)
denominator_1 = np.multiply(weight_of_soil, Z)
denominator_2 = np.multiply(np.sin(alpha), np.cos(alpha))
denominator = np.multiply(denominator_1, denominator_2)
return np.divide(numerator, denominator)
#==============================================================================
#==============================================================================
def FS(psi_val, weight_of_water, weight_of_soil, alpha, cohesion, friction_angle, Z):
'''
Compute FS from equation 28a (same as equation 29, but broken into different components)
The Psi value is the dimensional Psi at a dimensional time.
'''
this_F_f = F_f(alpha, friction_angle)
this_F_c = F_c(cohesion, weight_of_soil, Z, alpha)
FS0 = np.add(this_F_f,this_F_c)
FSprime = F_w(psi_val, weight_of_water, weight_of_soil, alpha, friction_angle, Z)
FS = np.add(FS0,FSprime)
return FS
#==============================================================================
#==============================================================================
def FS_fxn_t_T_Z(Zs, t_sec, T_sec, weight_of_water, weight_of_soil, alpha, cohesion, friction_angle, beta, d, Iz_over_Kz, D_0):
'''
Compute FS from equation 28a (same as equation 29, but broken into different components)
The Psi value is the dimensional Psi at a dimensional time.
'''
# Get the normalised diffusivity
D_hat = D_hat_fn(D_0, alpha)
# get pressure
this_psi = psi_dimensional_t(Zs, beta, d, Iz_over_Kz, D_hat, t_sec, T_sec)
# Correct Psi: it is limited by the beta curve (which is just the saturated pore pressure)
# NOTE IVERSON DOESN'T USE THIS IN HIS FIGURES EVEN THOUGH HE SAYS YOU SHOULD
corr_psi = Correct_psi(Zs,this_psi,beta)
# This is what iverson does to generate figres 10 and 11
#corr_psi = this_psi
# Now get the Factor of safety
this_FS = FS(corr_psi, weight_of_water, weight_of_soil, alpha, cohesion, friction_angle, Zs)
return this_FS
#==============================================================================
#==============================================================================
def Iverson_Fig_5(T_star):
'''
Reproduces Figure 5. Pass in T* values of 0.1, 1.0 and 10 to generate
subplots A, B and C.
'''
t_stars = np.linspace(0.1, 1000, 10000)
vals = []
for t in t_stars:
if t <= T_star:
vals.append(R_fn(t))
else:
vals.append(R_fn(t) - R_fn(t - T_star))
plt.plot(t_stars, vals)
plt.vlines(T_star, 0., max(vals), linestyle='--',
label='$T^*$ = {0}'.format(T_star))
ax = plt.gca()
ax.set_xscale("log", nonposx='clip')
plt.xlabel('Normalised Time, $t^*$')
plt.ylabel('R')
legend = plt.legend()
legend.get_frame().set_linewidth(0.)
plt.tight_layout()
plt.savefig('Fig_5.png')
#==============================================================================
#==============================================================================
def Iverson_Fig_6():
'''
Reproduces Figure 6.
Calls the response function for a range of T* values and identifies the
peak R value and plots the peak t* and peak R values.
'''
T_Stars = np.linspace(0.001, 1000, 1000)
t_stars = np.linspace(0.01, 1000, 1000)
t_peak = []
R_peak = []
for T in T_Stars:
vals = []
for t in t_stars:
if t <= T:
vals.append(R_fn(t))
else:
vals.append(R_fn(t) - R_fn(t - T))
index = np.argmax(vals)
t_peak.append(t_stars[index])
R_peak.append(vals[index])
plt.plot(T_Stars, t_peak, 'k--', label='$t^*$ peak')
plt.plot(T_Stars, R_peak, 'r-', label='R peak')
ax = plt.gca()
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
plt.xlabel('Normalised Rainfall Duration, $T^*$')
plt.ylabel('Normalised Peak Time and Peak Response')
plt.ylim(0.001, 1000.)
legend = plt.legend(loc=2)
legend.get_frame().set_linewidth(0.)
plt.tight_layout()
plt.savefig('Fig_6.png')
#==============================================================================
#==============================================================================
def Iverson_Fig_7(t, T, d, Do, alpha, Iz_over_Kz, Iz_over_Kz_steady):
'''
Reproduces Figure 7. Currently does not work.
Supply t as a vector in weeks and T in weeks
'''
# Get parameters for psi curves
D_hat = D_hat_fn(Do, alpha)
Zs = np.linspace(0.01, 6., 10)
beta = Beta_fn(alpha, Iz_over_Kz_steady)
beta_line = Beta_line(Zs, beta)
initial_line = psi_dimensional_t(Zs, beta, d, Iz_over_Kz, D_hat, 0, T)
# times in weeks
ts_in_weeks = [0, 4, 8, 12, 24]
ts = weeks_to_secs(ts_in_weeks)
Fig1 = plt.figure(1, facecolor='white', figsize=(10, 8))
Fig1.gca().invert_yaxis()
plt.plot(beta_line, Zs, 'k--', label='$\\beta$ Line')
Ts = weeks_to_secs(T)
for t_week in t:
ts = weeks_to_secs(t_week)
this_label = 't = ' + str(t_week) + ' weeks'
psi = psi_dimensional_t(Zs, beta, d, Iz_over_Kz, D_hat, ts, Ts)
plt.plot(psi, Zs, label=this_label)
# plt.xlim(-2, 5)
legend = plt.legend()
legend.get_frame().set_linewidth(0.)
plt.xlabel('Pressure head (m)')
plt.ylabel('Depth (m)')
plt.title('T = ' + str(T) + ' weeks')
plt.tight_layout()
plt.savefig("IversonFig7_" + str(int(T)) + ".png", format="png")
plt.show()
#==============================================================================
#==============================================================================
def Iverson_FoS_Fig10(weight_of_water, weight_of_soil, alpha, cohesion, friction_angle, d, Iz_over_Kz, Iz_over_Kz_steady, D_0, t_sec, T_sec,name_string):
# Get parameters for psi curves
Zs = np.linspace(0.001, 6., 200)
beta = Beta_fn(alpha, Iz_over_Kz_steady)
Fig1 = plt.figure(1, facecolor='white', figsize=(10, 8))
Fig1.gca().invert_yaxis()
for this_time in t_sec:
this_FS = FS_fxn_t_T_Z(Zs, this_time, T_sec, weight_of_water, weight_of_soil, alpha, cohesion, friction_angle, beta, d, Iz_over_Kz, D_0)
this_label = 't = ' + str(this_time) + ' seconds'
plt.plot(this_FS, Zs, label=this_label,linewidth=3)
ax1 = plt.gca()
ax1.add_patch(patches.Rectangle(
(0.9, 0), # (x,y)
0.1, # width
6, # height
alpha=0.2,
facecolor="red",
linewidth=2,
))
plt.text(0.92, 2, 'Unstable', rotation=90, fontsize=24)
#ax = plt.gca()
plt.xlim(0.9, 1.4)
legend = plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=2.,fontsize = 20)
legend.get_frame().set_linewidth(1.)
plt.xlabel('Factor of Safety (dimensionless)')
plt.ylabel('Depth (m)')
#plt.title('T = ' + str(T) + ' weeks')
plt.tight_layout()
plt.savefig(name_string, format="png")
plt.show()
#==============================================================================
#==============================================================================
def Iverson_FoS_Fig11(weight_of_water, weight_of_soil, alpha, cohesion, friction_angle, d, Iz_over_Kz, Iz_over_Kz_steady, D_0, t_sec, T_sec,name_string):
# Get parameters for psi curves
Zs = np.linspace(0.001, 0.7, 200)
beta = Beta_fn(alpha, Iz_over_Kz_steady)
Fig1 = plt.figure(1, facecolor='white', figsize=(10, 8))
Fig1.gca().invert_yaxis()
for this_time in t_sec:
this_FS = FS_fxn_t_T_Z(Zs, this_time, T_sec, weight_of_water, weight_of_soil, alpha, cohesion, friction_angle, beta, d, Iz_over_Kz, D_0)
this_label = 't = ' + str(this_time) + ' seconds'
plt.plot(this_FS, Zs, label=this_label,linewidth=3)
ax1 = plt.gca()
ax1.add_patch(patches.Rectangle(
(0.5, 0), # (x,y)
0.5, # width
0.7, # height
alpha=0.2,
facecolor="red",
linewidth=2,
))
plt.text(0.92, 2, 'Unstable', rotation=90, fontsize=24)
#ax = plt.gca()
plt.xlim(0.5, 2.0)
legend = plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=2.,fontsize = 20)
legend.get_frame().set_linewidth(1.)
plt.xlabel('Factor of Safety (dimensionless)')
plt.ylabel('Depth (m)')
#plt.title('T = ' + str(T) + ' weeks')
plt.tight_layout()
plt.savefig(name_string, format="png")
plt.show()
#==============================================================================
|
mit
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/IPython/terminal/ipapp.py
|
7
|
13910
|
#!/usr/bin/env python
# encoding: utf-8
"""
The :class:`~IPython.core.application.Application` object for the command
line :command:`ipython` program.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
import warnings
from traitlets.config.loader import Config
from traitlets.config.application import boolean_flag, catch_config_error, Application
from IPython.core import release
from IPython.core import usage
from IPython.core.completer import IPCompleter
from IPython.core.crashhandler import CrashHandler
from IPython.core.formatters import PlainTextFormatter
from IPython.core.history import HistoryManager
from IPython.core.application import (
ProfileDir, BaseIPythonApplication, base_flags, base_aliases
)
from IPython.core.magics import ScriptMagics
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.extensions.storemagic import StoreMagics
from .interactiveshell import TerminalInteractiveShell
from IPython.paths import get_ipython_dir
from traitlets import (
Bool, List, Dict, default, observe,
)
#-----------------------------------------------------------------------------
# Globals, utilities and helpers
#-----------------------------------------------------------------------------
_examples = """
ipython --matplotlib # enable matplotlib integration
ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
ipython --log-level=DEBUG # set logging to DEBUG
ipython --profile=foo # start with profile foo
ipython profile create foo # create profile foo w/ default config files
ipython help profile # show the help for the profile subcmd
ipython locate # print the path to the IPython directory
ipython locate profile foo # print the path to the directory for profile `foo`
"""
#-----------------------------------------------------------------------------
# Crash handler for this application
#-----------------------------------------------------------------------------
class IPAppCrashHandler(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app):
contact_name = release.author
contact_email = release.author_email
bug_tracker = 'https://github.com/ipython/ipython/issues'
super(IPAppCrashHandler,self).__init__(
app, contact_name, contact_email, bug_tracker
)
def make_report(self,traceback):
"""Return a string containing a crash report."""
sec_sep = self.section_sep
# Start with parent report
report = [super(IPAppCrashHandler, self).make_report(traceback)]
# Add interactive-specific info we may have
rpt_add = report.append
try:
rpt_add(sec_sep+"History of session input:")
for line in self.app.shell.user_ns['_ih']:
rpt_add(line)
rpt_add('\n*** Last line of input (may not be in above history):\n')
rpt_add(self.app.shell._last_input_line+'\n')
except:
pass
return ''.join(report)
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags.update(shell_flags)
frontend_flags = {}
addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
'Turn on auto editing of files with syntax errors.',
'Turn off auto editing of files with syntax errors.'
)
addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
"Force simple minimal prompt using `raw_input`",
"Use a rich interactive prompt with prompt_toolkit",
)
addflag('banner', 'TerminalIPythonApp.display_banner',
"Display a banner upon starting IPython.",
"Don't display a banner upon starting IPython."
)
addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
"""Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
"Don't prompt the user when exiting."
)
addflag('term-title', 'TerminalInteractiveShell.term_title',
"Enable auto setting the terminal title.",
"Disable auto setting the terminal title."
)
classic_config = Config()
classic_config.InteractiveShell.cache_size = 0
classic_config.PlainTextFormatter.pprint = False
classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'
classic_config.InteractiveShell.separate_in = ''
classic_config.InteractiveShell.separate_out = ''
classic_config.InteractiveShell.separate_out2 = ''
classic_config.InteractiveShell.colors = 'NoColor'
classic_config.InteractiveShell.xmode = 'Plain'
frontend_flags['classic']=(
classic_config,
"Gives IPython a similar feel to the classic Python prompt."
)
# # log doesn't make so much sense this way anymore
# paa('--log','-l',
# action='store_true', dest='InteractiveShell.logstart',
# help="Start logging to the default log file (./ipython_log.py).")
#
# # quick is harder to implement
frontend_flags['quick']=(
{'TerminalIPythonApp' : {'quick' : True}},
"Enable quick startup with no config files."
)
frontend_flags['i'] = (
{'TerminalIPythonApp' : {'force_interact' : True}},
"""If running code from the command line, become interactive afterwards.
It is often useful to follow this with `--` to treat remaining flags as
script arguments.
"""
)
flags.update(frontend_flags)
aliases = dict(base_aliases)
aliases.update(shell_aliases)
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class LocateIPythonApp(BaseIPythonApplication):
description = """print the path to the IPython dir"""
subcommands = Dict(dict(
profile=('IPython.core.profileapp.ProfileLocate',
"print the path to an IPython profile directory",
),
))
def start(self):
if self.subapp is not None:
return self.subapp.start()
else:
print(self.ipython_dir)
class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
name = u'ipython'
description = usage.cl_usage
crash_handler_class = IPAppCrashHandler
examples = _examples
flags = Dict(flags)
aliases = Dict(aliases)
classes = List()
@default('classes')
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
HistoryManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
StoreMagics,
]
deprecated_subcommands = dict(
qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
),
notebook=('notebook.notebookapp.NotebookApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
),
console=('jupyter_console.app.ZMQTerminalIPythonApp',
"""DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
),
nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
"DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
),
trust=('nbformat.sign.TrustNotebookApp',
"DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
),
kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
"DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
),
)
subcommands = dict(
profile = ("IPython.core.profileapp.ProfileApp",
"Create and manage IPython profiles."
),
kernel = ("ipykernel.kernelapp.IPKernelApp",
"Start a kernel without an attached frontend."
),
locate=('IPython.terminal.ipapp.LocateIPythonApp',
LocateIPythonApp.description
),
history=('IPython.core.historyapp.HistoryApp',
"Manage the IPython history database."
),
)
deprecated_subcommands['install-nbextension'] = (
"notebook.nbextensions.InstallNBExtensionApp",
"DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
)
subcommands.update(deprecated_subcommands)
# *do* autocreate requested profile, but don't create the config file.
auto_create=Bool(True)
# configurables
quick = Bool(False,
help="""Start IPython quickly by skipping the loading of config files."""
).tag(config=True)
@observe('quick')
def _quick_changed(self, change):
if change['new']:
self.load_config_file = lambda *a, **kw: None
display_banner = Bool(True,
help="Whether to display a banner upon starting IPython."
).tag(config=True)
# if there is code of files to run from the cmd line, don't interact
# unless the --i flag (App.force_interact) is true.
force_interact = Bool(False,
help="""If a command or file is given via the command-line,
e.g. 'ipython foo.py', start an interactive shell after executing the
file or command."""
).tag(config=True)
@observe('force_interact')
def _force_interact_changed(self, change):
if change['new']:
self.interact = True
@observe('file_to_run', 'code_to_run', 'module_to_run')
def _file_to_run_changed(self, change):
new = change['new']
if new:
self.something_to_run = True
if new and not self.force_interact:
self.interact = False
# internal, not-configurable
something_to_run=Bool(False)
def parse_command_line(self, argv=None):
"""override to allow old '-pylab' flag with deprecation warning"""
argv = sys.argv[1:] if argv is None else argv
if '-pylab' in argv:
# deprecated `-pylab` given,
# warn and transform into current syntax
argv = argv[:] # copy, don't clobber
idx = argv.index('-pylab')
warnings.warn("`-pylab` flag has been deprecated.\n"
" Use `--matplotlib <backend>` and import pylab manually.")
argv[idx] = '--pylab'
return super(TerminalIPythonApp, self).parse_command_line(argv)
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(TerminalIPythonApp, self).initialize(argv)
if self.subapp is not None:
# don't bother initializing further, starting subapp
return
# print self.extra_args
if self.extra_args and not self.something_to_run:
self.file_to_run = self.extra_args[0]
self.init_path()
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
# Now a variety of things that happen after the banner is printed.
self.init_gui_pylab()
self.init_extensions()
self.init_code()
def init_shell(self):
"""initialize the InteractiveShell instance"""
# Create an InteractiveShell instance.
# shell.display_banner should always be False for the terminal
# based app, because we call shell.show_banner() by hand below
# so the banner shows *before* all extension loading stuff.
self.shell = TerminalInteractiveShell.instance(parent=self,
profile_dir=self.profile_dir,
ipython_dir=self.ipython_dir, user_ns=self.user_ns)
self.shell.configurables.append(self)
def init_banner(self):
"""optionally display the banner"""
if self.display_banner and self.interact:
self.shell.show_banner()
# Make sure there is a space below the banner.
if self.log_level <= logging.INFO: print()
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warnings.warn("'inline' not available as pylab backend, "
"using 'auto' instead.")
self.pylab = 'auto'
def start(self):
if self.subapp is not None:
return self.subapp.start()
# perform any prexec steps:
if self.interact:
self.log.debug("Starting IPython's mainloop...")
self.shell.mainloop()
else:
self.log.debug("IPython not interactive...")
def load_default_config(ipython_dir=None):
"""Load the default config file from the default ipython_dir.
This is useful for embedded shells.
"""
if ipython_dir is None:
ipython_dir = get_ipython_dir()
profile_dir = os.path.join(ipython_dir, 'profile_default')
config = Config()
for cf in Application._load_config_files("ipython_config", path=profile_dir):
config.update(cf)
return config
launch_new_instance = TerminalIPythonApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
|
gpl-3.0
|
mvfcopetti/pySSN
|
pyssn/core/spectrum.py
|
1
|
124124
|
"""
pySSN is available under the GNU licence providing you cite the developpers names:
Ch. Morisset (Instituto de Astronomia, Universidad Nacional Autonoma de Mexico)
D. Pequignot (Meudon Observatory, France)
"""
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from scipy import interpolate
from collections import OrderedDict
from pyssn import log_, config
if config.INSTALLED['PyNeb']:
import pyneb as pn
from ..utils.physics import CST, Planck, make_cont_Ercolano, gff
from ..utils.misc import execution_path, change_size, convol, rebin, is_absorb, no_red_corr, gauss, carre, lorentz, convolgauss
from ..utils.misc import vactoair, airtovac, clean_label, get_parser, read_data, my_execfile as execfile
from ..core.profiles import profil_instr
"""
ToDo:
1) Define the special lines in a table or dictionnary where ref, color, style are set up.
"""
# mvfc: 'save_data' removed because it is never used (and it is similar to 'print_data' in misc.py).
# mvfc: changed to capture the error message
def read_data(filename, NF=True):
def rows(mask):
nMax = 5
rowList = np.array(range(1, len(dd)+1))[mask]
rows = ''.join([ str(i)+', ' for i in rowList[:nMax]]).rstrip(', ')
if len(rowList) > nMax:
rows = rows + ', ...'
return rows
dtype = 'i8, a1, a9, float64, float64, float64, float64, a1, i8, i4, f, a100'
if NF:
delimiter = [14, 1, 9, 11, 6, 10, 7, 1, 14, 4, 7, 100]
else:
delimiter = [ 9, 1, 9, 11, 6, 10, 7, 1, 9, 4, 7, 100]
names = ['num', 'foo', 'id', 'lambda','l_shift', 'i_rel', 'i_cor', 'foo2', 'ref', 'profile',
'vitesse', 'comment']
usecols = (0, 2, 3, 4, 5, 6, 8, 9, 10, 11)
dd = np.genfromtxt(filename, dtype=dtype, delimiter=delimiter, names = names, usecols = usecols)
#mvfc: this is needed to avoid the problem of a cosmetic file with only one line
if dd.size == 1:
dd = np.atleast_1d(dd)
msg = ''
if (dd['num'] == -1).sum() > 0:
msg = msg + '\nInvalid line number at row: {}'.format(rows([dd['num'] == -1]))
# mvfc: this is not not working, because an invalid integer is converted to -1, not to nan. why?
if np.isnan(dd['num']).sum() > 0:
msg = msg + '\nInvalid line number at row: {}'.format(rows(np.isnan(dd['num'])))
if np.isnan(dd['lambda']).sum() > 0:
mask = np.isnan(dd['lambda'])
msg = msg + '\nInvalid wavelength at row: {}'.format(rows(np.isnan(dd['lambda'])))
if np.isnan(dd['l_shift']).sum() > 0:
msg = msg + '\nInvalid wavelength shift at row: {}'.format(rows(np.isnan(dd['l_shift'])))
if np.isnan(dd['i_cor']).sum() > 0:
msg = msg + '\nInvalid intensity correction at row: {}'.format(rows(np.isnan(dd['i_cor'])))
if np.isnan(dd['i_rel']).sum() > 0:
msg = msg + '\nInvalid relative intensity at row: {}'.format(rows(np.isnan(dd['i_rel'])))
if len(msg) > 0:
dd = dd[:0]
return dd.view(np.recarray), msg
class spectrum(object):
def __init__(self, config_file=None, phyat_file=None, profil_instr=profil_instr,
do_synth = None, do_read_liste = None, do_cosmetik = None, do_run = True, limit_sp = None,
spectr_obs=None, sp_norm=None, obj_velo=None, post_proc_file=None):
"""
Main pySSN object.
It reads the configuration file given by the config_file parameter.
It reads the atomic data, model and cosmetik files. It reads the observation. It computes the reddening
correction, the
"""
self.cursor = None
self.errorMsg = ''
self.selected_ions_data = None
self.process = { '0' : 'recombination',
'1' : 'recombination',
'2' : 'dielectronic',
'3' : 'collisional',
'4' : 'Bowen',
'5' : 'recombination',
'6' : 'recombination',
'7' : 'fluorecence',
'8' : 'charge exchange',
'9' : 'recombination' }
self.process_abbr = {
'0' : 'rec.',
'1' : 'rec.',
'2' : 'die.',
'3' : 'col.',
'4' : 'fl.',
'5' : 'rec.',
'6' : 'rec.',
'7' : 'fl.',
'8' : 'ch.ex.',
'9' : 'rec.' }
self.fields = [ 'num', 'id', 'lambda', 'proc', 'l_shift', 'i_rel', 'i_cor', 'ref', 'profile', 'vitesse', 'comment' ]
self.field_width = { 'num' : 14,
'id' : 9,
'lambda' : 11,
'proc' : 1,
'l_shift' : 6,
'l_tot' : 11,
'i_rel' : 10,
'i_cor' : 7,
'i_tot' : 10,
'ref' : 14,
'profile' : 4,
'vitesse' : 7,
'comment' : 100 }
self.field_align = { 'num' : '>',
'id' : '<',
'lambda' : '>',
'proc' : '<',
'l_shift' : '>',
'l_tot' : '>',
'i_rel' : '>',
'i_cor' : '>',
'i_tot' : '>',
'ref' : '>',
'profile' : '>',
'vitesse' : '>',
'comment' : '<' }
self.field_pos = { 'num' : 0,
'id' : 15,
'lambda' : 24,
'proc' : 5,
'l_shift' : 35,
'i_rel' : 41,
'i_cor' : 51,
'ref' : 59,
'profile' : 73,
'vitesse' : 77,
'comment' : 85 }
self.field_format = { 'num' : '{:>14d}',
'id' : '{:9s}',
'lambda' : '{:11.3f}',
'proc' : '{:1s}',
'l_shift' : '{:6.3f}',
'l_tot' : '{:11.3f}',
'i_rel' : '{:10.3e}',
'i_cor' : '{:7.3f}',
'i_tot' : '{:10.3e}',
'ref' : '{:>14d}',
'profile' : '{:>4d}',
'vitesse' : '{:7.2f}',
'comment' : '{:>s}' }
self.field_tip = { 'num' : 'line code number',
'id' : 'ion',
'lambda' : 'wavelength in air',
'proc' : 'line process',
'l_shift' : 'wavelength additive correction',
'l_tot' : 'corrected wavelength',
'i_rel' : 'relative intensity',
'i_cor' : 'intensity correction factor',
'i_tot' : 'corrected intensity',
'ref' : 'reference line code number',
'profile' : 'line profile code number',
'vitesse' : 'natural line width',
'comment' : 'comment' }
self.field_abbr = { 'num' : 'line number',
'id' : 'ion',
'lambda' : 'wavelength',
'proc' : 'process',
'l_shift' : 'w shift',
'l_tot' : 'corr wave',
'i_rel' : 'intensity',
'i_cor' : 'i factor',
'i_tot' : 'corr int',
'ref' : 'ref line',
'profile' : 'profile',
'vitesse' : 'v factor',
'comment' : 'comment' }
self.calling = 'spectrum'
self.full_config_file = config_file
if '/' in self.full_config_file:
file_name = self.full_config_file.split('/')[-1]
dir_ = self.full_config_file.split(file_name)[0]
if dir_ == '':
dir_ = './'
self.directory = dir_
self.config_file = file_name
else:
self.directory = './'
self.config_file = self.full_config_file
config.addDataFilePath(self.directory, inpySSN=False)
self.init_vars()
self.read_conf(self.config_file)
log_.level = self.get_conf('log_level', 2)
if not self.get_conf('do_synth'):
self.set_conf('plot_residuals', False)
self.set_conf('fic_cosmetik', 'NO_cosmetik.dat')
self.set_conf('fic_modele', 'NO_modele.dat')
self.set_conf('phyat_file', 'NO_phyat.dat')
if self.get_conf('spectr_obs') is None:
self.set_conf('plot_residuals', False)
if do_synth is None:
self.do_synth = self.get_conf('do_synth')
else:
self.do_synth = do_synth
if do_read_liste is None:
do_read_liste = self.get_conf('do_read_liste')
if do_cosmetik is None:
do_cosmetik = self.get_conf('do_cosmetik')
self.profil_instr = profil_instr
self.do_cosmetik = do_cosmetik
self.post_proc_file = post_proc_file
self.init_obs(spectr_obs=spectr_obs, sp_norm=sp_norm, obj_velo=obj_velo, limit_sp=limit_sp)
self.init_red_corr()
self.make_continuum()
if phyat_file is not None:
self.phyat_file = phyat_file
else:
self.phyat_file = self.get_conf('phyat_file', 'liste_phyat.dat')
if do_run:
self.run(do_synth = self.do_synth, do_read_liste = do_read_liste)
self.show_uncor_spec = False
def init_vars(self):
self.fig1 = None
self.fig2 = None
self.fig3 = None
self.ax1 = None
self.ax2 = None
self.ax3 = None
self.cursor_width = 0.02
self.cursor_w0 = None
self.cursor_w1 = None
self.cursor_w2 = None
self.firstClick = True
self.aire_ref = 1.0
self.zoom_fact = 0.1
self._cid = None
self.plot_magenta = None
self.plot_cyan = None
self.label_magenta = None
self.label_cyan = None
self.hr = False
self.split = True
self.do_ax2 = True
self.do_buttons = True
self.do_ax3 = True
self.ax2_fontsize = 12
self.legend_loc = 1
self.legend_fontsize = 'medium'
self.x_plot_lims = None
self.y1_plot_lims = None
self.y2_plot_lims = None
self.y3_plot_lims = None
self.read_obs_error = ''
self.iterpolate_velocity = True
def init_obs(self, spectr_obs=None, sp_norm=None, obj_velo=None, limit_sp=None):
if spectr_obs is not None:
self.set_conf('spectr_obs', spectr_obs)
if sp_norm is not None:
self.set_conf('sp_norm', sp_norm)
if obj_velo is not None:
self.set_conf('obj_velo', obj_velo)
if limit_sp is None:
self.limit_sp = self.get_conf('limit_sp')
else:
self.limit_sp = limit_sp
self.read_obs()
def run(self, do_synth = True, do_read_liste = True, do_profiles=True):
ErrorMsg = ''
if do_profiles:
self.do_profile_dict()
if do_synth:
if do_read_liste:
self.fic_model = self.get_conf('fic_modele', message='error')
self.phyat_arr, ErrorMsg = self.read_phyat(self.phyat_file)
self.errorMsg = ('{}\n\n{}'.format(self.errorMsg,ErrorMsg)).strip()
self.model_arr, ErrorMsg = self.read_model(self.fic_model)
self.errorMsg = ('{}\n\n{}'.format(self.errorMsg,ErrorMsg)).strip()
self.n_models = len(self.model_arr)
self.n_data = len(self.phyat_arr)
if self.n_models > 0 and self.n_data > 0:
self.cosmetik_arr, errorMsg = self.read_cosmetik()
self.n_cosmetik = len(self.cosmetik_arr)
self.sp_theo, self.liste_totale, self.liste_raies = \
self.append_lists(self.phyat_arr, self.model_arr, self.cosmetik_arr)
self.sp_theo, self.sp_synth = self.make_synth(self.liste_raies, self.sp_theo)
self.n_sp_theo = len(self.sp_theo['spectr'])
else:
self.sp_theo = None
self.sp_synth = None
self.n_sp_theo = 0
self.set_conf('do_synth', False)
else:
self.sp_theo = None
self.sp_synth = None
self.n_sp_theo = 0
self.f *= self.aire_ref
self.sp_abs = self.make_sp_abs(self.sp_theo)
self.make_filter_instr()
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
def get_key_indexes(self, key, prof):
return sorted([indexed_key.replace(key,'') for indexed_key in prof.keys() if key in indexed_key])
def format_instr_prof(self):
def get_indexes(key):
l = self.get_key_indexes(key, self.conf['instr_prof'])
return l
prof = self.conf['instr_prof']
if prof is None:
return 'instrumental profile not defined'
keys = prof.keys()
if not 'width' in keys:
return 'invalid instrumental profile: width parameter is missing'
indexes = get_indexes('Bb')
if not indexes == get_indexes('Br') == get_indexes('alpha') == get_indexes('beta'):
return 'invalid instrumental profile: error in indexes'
w1 = max([len(str(prof[key])) for key in keys if 'Bb' in key])
w2 = max([len(str(prof[key])) for key in keys if 'Br' in key])
w3 = max([len(str(prof[key])) for key in keys if 'beta' in key])
w4 = max([len(str(prof[key])) for key in keys if 'alpha' in key])
s = '\'width\': {}'.format(prof['width'])
for i in indexes:
s += ',\n \'Bb{0}\':{1:{w1}}, \'Br{0}\':{2:{w2}}, \'beta{0}\':{3:{w3}}, \'alpha{0}\':{4:{w4}}'.format(i,
prof['Bb'+i], prof['Br'+i], prof['beta'+i], prof['alpha'+i], w1 = w1, w2 = w2, w3 = w3, w4 = w4)
if 'comment' in keys:
s += ',\n \'comment\': \'{}\''.format(prof['comment'].strip())
s = '{{{}}}'.format(s)
return s
def do_profile_dict(self, return_res=False):
self.fic_profs = self.get_conf('fic_profile', None)
if self.fic_profs is None:
self.fic_profs = execution_path('./')+'../data/default_profiles.dat'
else:
self.fic_profs = self.directory + self.fic_profs
if not os.path.isfile(self.fic_profs):
log_.error('File not found {}'.format(self.fic_profs), calling=self.calling)
emis_profiles = {}
emis_profiles['1'] = {'T4': 1.0, 'vel':0.0, 'params': [['G', 1.0, 0.0, 1.0]]}
prof_params = None
with open(self.fic_profs) as f:
for l in f:
if l[0] not in ('#', 'c', 'C', ';'):
if '#' in l:
l = l.split('#')[0]
if ';' in l:
l = l.split(';')[0]
if ':' in l:
if prof_params is not None:
emis_profiles[key] = {'T4': T4,
'vel': vel,
'params' : prof_params}
key, T4_vel = l.split(':')
T4_vel = T4_vel.split()
T4 = np.float(T4_vel[0].strip())
if len(T4_vel) == 2:
vel = np.float(T4_vel[1].strip())
else:
vel = 0.0
prof_params = []
else:
if l.split() != []:
params = l.split()
params[1::] = [np.float(p.strip()) for p in params[1::]]
prof_params.append(params)
emis_profiles[key] = {'T4': T4,
'vel': vel,
'params' : prof_params}
log_.message('line profiles read from {0}'.format(self.fic_profs),
calling = self.calling)
if return_res:
return emis_profiles
self.emis_profiles = emis_profiles
def compare_profiles(self):
ref_diff = []
new_profile = self.do_profile_dict(return_res=True)
for k in new_profile.keys():
if k not in self.emis_profiles.keys():
ref_diff.append(k)
if new_profile[k]['T4'] != self.emis_profiles[k]['T4']:
ref_diff.append(k)
if new_profile[k]['vel'] != self.emis_profiles[k]['vel']:
ref_diff.append(k)
for lo, ln in zip(self.emis_profiles[k]['params'], new_profile[k]['params']):
for llo,lln in zip(lo, ln):
if llo != lln:
ref_diff.append(k)
return np.unique(ref_diff)
def get_profile(self, raie):
basic_profiles_dic = {'G': (3, gauss),
'C': (3, carre),
'L': (3, lorentz)}
profile_key = str(raie['profile'])
if profile_key not in self.emis_profiles:
profile_key = '1'
T4 = self.emis_profiles[profile_key]['T4']
vel = self.emis_profiles[profile_key]['vel']
params_str = self.emis_profiles[profile_key]['params']
lambda_0 = raie['lambda'] + raie['l_shift'] + self.get_conf('lambda_shift', 0.0)
w_norm = self.w - lambda_0 - vel * lambda_0 / CST.CLIGHT * 1e5
profile = np.zeros_like(self.w)
largeur = raie['vitesse'] * lambda_0 / CST.CLIGHT * 1e5
masse = 2 * (raie['num'] - raie['num'] % 100000000000)/100000000000
if (masse == 2 and (raie['num'] - raie['num'] % 101000000000)/100000000 == 1010) :
masse = 1
for param in params_str:
profile_type = param[0]
if profile_type not in basic_profiles_dic:
log_.error('Wrong number profile reference{}'.format(profile_type), calling=self.calling)
params = param[1::]
if len(params) != basic_profiles_dic[profile_type][0]:
log_.error('Wrong number of parameters {} for profile {}'.format(len(params), profile_type), calling=self.calling)
profile += basic_profiles_dic[profile_type][1](w_norm, params[0], params[1]*largeur, params[2]*largeur)
if T4 > 0.0:
fwhm_therm = 21.4721 * np.sqrt(T4 / masse) * lambda_0 / CST.CLIGHT * 1e5 #km/s
profile = convolgauss(profile, self.w, lambda_0, fwhm_therm)
profile[~np.isfinite(profile)] = 0.0
return profile
def read_conf(self, config_file=None):
if config_file is None:
config_file = self.config_file
else:
self.config_file = config_file
self.conf = {}
init_conf = {}
execfile(execution_path('./')+'init_defaults.py', self.conf)
self.default_keys = self.conf.keys()
if self.config_file is not None:
if not os.path.exists(self.directory + self.config_file):
log_.error('File {} not found'.format(self.directory + self.config_file))
try:
execfile(self.directory + self.config_file, init_conf)
log_.message('configuration read from {0}'.format(self.config_file),
calling = self.calling)
except:
log_.warn('configuration NOT read from {0}'.format(self.config_file),
calling = self.calling)
obsolete_keys = list(set(init_conf.keys())-set(self.default_keys))
obsolete_keys.sort()
if len(obsolete_keys) > 0:
log_.message('list of variables read from {} that changed name or are obsolete:\n{}'.format(self.config_file, obsolete_keys),
calling = self.calling)
# to change keys automatically
old_keys = ['allow_editing_lines', 'gcont_pl_alpha', 'index_of_current_ion', 'prof', 'line_field_print', 'line_saved_filename', 'line_saved_header', 'line_saved_ordered_by', 'qt_fig_adjust', 'qt_fig_bottom', 'qt_fig_hspace', 'qt_fig_left', 'qt_fig_right', 'qt_fig_top', 'show_dialogs', 'update_after_editing_lines']
new_keys = ['qt_allow_editing_lines', 'cont_pl_alpha', 'index_of_selected_ions', 'instr_prof', 'save_lines_fields', 'save_lines_filename', 'save_lines_header', 'save_lines_sort', 'fig_adjust', 'fig_bottom', 'fig_hspace', 'fig_left', 'fig_right', 'fig_top', 'qt_show_dialogs', 'qt_update_after_editing_lines']
new_name = dict(zip(old_keys, new_keys))
for key in old_keys:
if key in init_conf.keys():
if new_name[key] not in init_conf.keys():
init_conf[new_name[key]] = init_conf[key]
log_.message('variable \'{}\' get from old name \'{}\' from init file {}'.format(new_name[key], key, self.config_file),
calling = self.calling)
del init_conf[key]
# to get 'cont_user_table' from old {'cont_in_lambda', 'cont_intens', 'cont_lambda'}
if {'cont_in_lambda', 'cont_intens', 'cont_lambda'}.issubset(set(init_conf.keys())) and 'cont_user_table' not in init_conf.keys():
x = init_conf['cont_lambda']
y = init_conf['cont_intens']
if isinstance(x, (list,)) and isinstance(y, (list,)) and len(x) == len(y):
s = ''
for i in range(len(x)):
s += '({}, {}), '.format(x[i], y[i])
s = s.strip(' ,')
if s != '':
path = 'cont_user_table = [{}]'.format(s)
try:
user_module = {}
exec(path) in user_module
value = user_module['cont_user_table']
self.set_conf('cont_user_table', value)
log_.message('\'cont_user_table\' get from \'cont_lambda\' and \'cont_intens\'', calling = self.calling)
except:
log_.warn('Can not get \'cont_user_table\' from \'cont_lambda\' and \'cont_intens\'', calling = self.calling)
self.conf.update(init_conf)
# Obsolete for qt
self.plot_magenta = self.get_conf('plot_magenta', None)
self.label_magenta = self.get_conf('label_magenta', None)
self.plot_cyan = self.get_conf('plot_cyan', None)
self.label_cyan = self.get_conf('label_cyan', None)
# If you DON'T want an i_cor on a main line to affect the satellites,
# set the following variable to False
self.set_conf('recursive_i_cor', True)
# Is i_cor applied in the atomic physic database AND model database?
# If this is the case, i_cor on phyat_database will be directly
# applied on i_rel and will not appear as i_cor in the printed liste
# of lines or with the cursor.
self.set_conf('do_icor_outside_cosmetik', True)
# If you want to perform cosmetik on reference lines (which have ref = 0):
self.set_conf('do_icor_on_ref', True)
# Here follow caracteristics of the reference line.
# This line will be assumed to have a flux
# at center of 1.00/A. NO!!!
# Obsolete
# self.set_conf('do_calcul_aire_ref', False)
# self.set_conf('raie_ref ', {"vitesse" : 25.0, "lambda" : 4861.0, "profile" : 1}) # depuis 25/10/01
def get_conf(self, key=None, undefined=None, message=None):
"""
Return the value of the key parameter in the configuration.
If key is not defined, return the value of the undefined keyword, default being None.
"""
if key is None:
for k in self.conf.keys():
self.get_conf(k)
return None
if key not in self.conf:
if message == 'warn':
log_.warn('{0} not defined in configuration file'.format(key), calling=self.calling)
elif message == 'message':
log_.message('{0} not defined in configuration file'.format(key), calling=self.calling)
elif message == 'error':
log_.error('{0} not defined in configuration file'.format(key), calling=self.calling)
else:
pass
return undefined
else:
return self.conf[key]
if 'fic_modele' not in self.conf:
log_.warn('fic_model not defined in configuration file', calling=self.calling)
return None
def set_conf(self, key, value):
"""
Set the value of the configuration "key" parameter to "value".
"""
self.conf[key] = value
def read_phyat(self, phyat_file):
self.phyat_file = phyat_file
phyat_arr = []
for dir_ in config.DataPaths:
try:
phyat_arr, ErrorMsg = read_data('{0}/{1}'.format(dir_, self.phyat_file))
if ErrorMsg:
ErrorMsg = 'Error in line database file \'{}\':'.format(self.phyat_file) + ErrorMsg
log_.message('phyat data read from {0}/{1}'.format(dir_, self.phyat_file),
calling = self.calling)
break
except:
ErrorMsg = 'Line database file \'{}\' not found.'.format(self.phyat_file)
if len(phyat_arr) == 0:
log_.warn( ErrorMsg, calling = self.calling)
return phyat_arr, ErrorMsg
def read_model(self, model_file):
ErrorMsg = ''
model_arr = []
path = self.directory + model_file
if not os.path.isfile(path):
ErrorMsg = 'Model file \'{}\' not found.'.format(os.path.basename(path))
log_.warn(ErrorMsg, calling=self.calling)
return model_arr, ErrorMsg
if model_file == 'from phyat':
mask = self.phyat_arr['ref'] == 999
model_arr = self.phyat_arr.copy()[mask]
model_arr['num'] -= 90000000000000
model_arr['vitesse'] = 10
log_.message('data initialized from phyat',
calling = self.calling)
else:
try:
model_arr, ErrorMsg = read_data(path)
if ErrorMsg == '':
log_.message('cosmetik read from {0}'.format(os.path.basename(path)), calling = self.calling)
else:
ErrorMsg = 'Error in model file \'{0}\':'.format(os.path.basename(path)) + ErrorMsg
log_.warn(ErrorMsg, calling = self.calling)
log_.message('data read from {0}'.format(path),
calling = self.calling)
except:
ErrorMsg = 'Unable to read from file \'{0}\''.format(os.path.basename(path))
log_.warn(ErrorMsg, calling = self.calling)
model_arr['ref'] = 0
return model_arr, ErrorMsg
def read_cosmetik_old(self):
self.fic_cosmetik = self.get_conf('fic_cosmetik', message='warn')
self.do_cosmetik = self.get_conf('do_cosmetik')
cosmetik_arr = []
if self.do_cosmetik and self.fic_cosmetik is not None:
try:
cosmetik_arr, msg = read_data(self.fic_cosmetik)
fic_cosmetik_ok = True
log_.message('cosmetik read from {0}'.format(self.directory + self.fic_cosmetik),
calling = self.calling)
except:
fic_cosmetik_ok = False
log_.warn('unable to read from {0}'.format(self.directory + self.fic_cosmetik),
calling = self.calling)
return cosmetik_arr, fic_cosmetik_ok
def read_cosmetik(self):
self.fic_cosmetik = self.get_conf('fic_cosmetik', message='warn')
self.do_cosmetik = self.get_conf('do_cosmetik')
cosmetik_arr = []
ErrorMsg = ''
if self.do_cosmetik and self.fic_cosmetik is not None:
if os.path.isabs(self.fic_cosmetik):
path = self.fic_cosmetik
else:
path = self.directory + self.fic_cosmetik
if os.path.isfile(path):
if os.path.getsize(path) > 0:
cosmetik_arr, ErrorMsg = read_data(path)
if ErrorMsg == '':
log_.message('cosmetik read from {0}'.format(path), calling = self.calling)
else:
log_.warn('unable to read from {0}'.format(path), calling = self.calling)
else:
log_.warn('empty cosmetic file {0}'.format(path), calling = self.calling)
else:
log_.warn('new cosmetic file {0}'.format(path), calling = self.calling)
return cosmetik_arr, ErrorMsg
def read_obs(self, k_spline = 1):
self.read_obs_error = ''
if self.get_conf('spectr_obs') is not None:
s = self.conf['spectr_obs'].split('.')
if len(s) == 1:
comm = '(with extention .fits, .spr, and .spr.gz) '
obs_file = self.directory + s[0] + '.spr'
if not os.path.isfile(obs_file):
obs_file = self.directory + s[0] + '.spr.gz'
if not os.path.isfile(obs_file):
obs_file = self.directory + s[0] + '.fits'
else:
comm = ''
obs_file = self.directory + self.conf['spectr_obs']
if not os.path.isfile(obs_file):
self.read_obs_error = 'Observed spectrum file \'{}\' {}not found'.format(self.conf['spectr_obs'], comm)
log_.warn(self.read_obs_error, calling = self.calling)
else:
if obs_file.split('.')[-1] == 'fits':
from astropy.io import fits
try:
self.f, header = fits.getdata(obs_file, header=True)
dispersion_start = header['CRVAL1'] - (header['CRPIX1'] - 1) * header['CDELT1']
self.w = dispersion_start + np.arange(len(self.f)) * header['CDELT1']
except:
self.read_obs_error = 'Observations NOT read from {0}'.format(obs_file)
log_.warn(self.read_obs_error, calling = self.calling)
else:
try:
self.obs = np.loadtxt(obs_file)
log_.message('Observations read from {0}'.format(obs_file),
calling = self.calling)
if bool(self.get_conf('data_incl_w', undefined = False)):
self.w = self.obs[:,0]
self.f = self.obs[:,1]
else:
self.f = self.obs
self.w = None
if bool(self.get_conf('reverse_spectra', undefined=False)):
self.f = self.f[::-1]
except:
self.read_obs_error = 'Observations NOT read from {0}'.format(obs_file)
log_.warn(self.read_obs_error, calling = self.calling)
if self.get_conf('spectr_obs') is None or len(self.read_obs_error) > 0:
n_pix = (self.limit_sp[1] - self.limit_sp[0]) / self.conf['lambda_pix']
self.w = np.linspace(self.limit_sp[0], self.limit_sp[1], n_pix)
self.f = np.ones_like(self.w)
self.set_conf('plot_residuals', False)
if self.get_conf('wave_unit') == 'mu':
self.w *= 10000.
self.n_lambda = len(self.f)
self.tab_pix = np.arange(self.n_lambda)
self.f *= self.get_conf('sp_norm', undefined = 1.)
if ("cal_lambda" in self.conf) and ("cal_pix" in self.conf) and (self.w is None):
cal_lambda = np.array(self.conf["cal_lambda"])
cal_pix = np.array(self.conf["cal_pix"])
arg_sort = cal_lambda.argsort()
cal_lambda = cal_lambda[arg_sort]
cal_pix = cal_pix[arg_sort]
interp_lam = interpolate.UnivariateSpline(cal_pix, cal_lambda, k=k_spline)
self.w = interp_lam(self.tab_pix)
log_.message('Wavelength table generated using spline of order {0}'.format(k_spline),
calling = self.calling)
if bool(self.get_conf('reverse_spectra', undefined=False)) :
self.f = self.f[::-1]
if self.limit_sp[0] < 0.01:
self.limit_sp[0] = np.min(self.w) * (1. + self.get_conf("delta_limit_sp")/100.)
if self.limit_sp[1] > 0.9e10:
self.limit_sp[1] = np.max(self.w) * (1. - self.get_conf("delta_limit_sp")/100.)
# mvfc: this is a new feature; obj_velo can be set by interpolation
# obj_velo_table = [(4000,85), (4200,90), (4300,90), (6000,75), (7000,85) ]
if self.get_conf('obj_velo_table') is not None and self.iterpolate_velocity:
try:
x = np.array([i[0] for i in list(self.get_conf('obj_velo_table'))])
y = np.array([i[1] for i in list(self.get_conf('obj_velo_table'))])
f = interpolate.interp1d(x, y)
v = f((float(self.limit_sp[0])+float(self.limit_sp[1]))/2)
v = int(v*100)/100.
self.set_conf('obj_velo', v)
except:
#self.set_conf('obj_velo', 0.0)
log_.warn('Error interpolating radial velocity', calling = self.calling)
self.obj_velo = self.get_conf("obj_velo", undefined=0.)
self.w *= 1 - self.obj_velo/(CST.CLIGHT/1e5)
log_.message('Wavelenghts shifted by Vel = {} km/s'.format(self.conf["obj_velo"]),
calling = self.calling)
lims = ((self.w >= self.limit_sp[0]) & (self.w <= self.limit_sp[1]))
log_.message('Observations resized from {0} to {1}'.format(len(self.w), lims.sum()), calling=self.calling)
self.w_min = self.w[0]
self.w_max = self.w[-1]
self.w = self.w[lims]
self.f = self.f[lims]
do_shift = False
if self.get_conf('lambda_shift_table') is not None:
try:
x = np.array([i[0] for i in list(self.get_conf('lambda_shift_table'))])
y = np.array([i[1] for i in list(self.get_conf('lambda_shift_table'))])
f = interpolate.interp1d(x, y, fill_value=0, bounds_error=False)
w_shift = f(self.w)
do_shift = True
except:
self.read_obs_error = 'Error interpolating wavelengh correction table'
log_.warn(self.read_obs_error, calling = self.calling)
self.w_obs = self.w.copy()
self.f_ori = self.f.copy()
if do_shift:
correction_is_valid = True
for i in range(1,len(self.w)):
if ((self.w[i]+w_shift[i])-(self.w[i-1]+w_shift[i-1]))*(self.w[i]-self.w[i-1]) <= 0:
correction_is_valid = False
if correction_is_valid:
self.w += w_shift
log_.message('Wavelengths shifted', calling = self.calling)
else:
self.read_obs_error = 'Invalid wavelengh correction table.\nThe order of pixels must be preserved.'
log_.warn(self.read_obs_error, calling = self.calling)
self.w_ori = self.w.copy()
resol = self.get_conf('resol', undefined = 1, message=None)
log_.message('Observations resized from {0} by a factor of {1}'.format(len(self.w), resol),
calling=self.calling)
self.w = change_size(self.w, resol)
self.f = change_size(self.f, resol)
self.n_lambda = len(self.f)
self.tab_pix = change_size(self.tab_pix, resol)
self.lambda_pix = (np.max(self.w) - np.min(self.w)) / self.n_lambda
#log_.debug('n_lambda = {}, tab_pix = {}, lambda_pix = {}'.format(self.n_lambda, self.tab_pix, self.lambda_pix),
# calling = self.calling)
def renorm(self, new_norm):
self.f /= self.get_conf('sp_norm', undefined = 1.)
self.f_ori /= self.get_conf('sp_norm', undefined = 1.)
self.set_conf('sp_norm', new_norm)
self.f *= self.get_conf('sp_norm', undefined = 1.)
self.f_ori *= self.get_conf('sp_norm', undefined = 1.)
def init_red_corr(self):
self.E_BV = self.get_conf('e_bv', 0.)
self.R_V = self.get_conf('r_v', 3.1)
if self.E_BV > 0:
RC = pn.RedCorr(E_BV = self.E_BV, law=self.get_conf('red_corr_law', message='error'), R_V=self.R_V)
self.red_corr = RC.getCorr(self.w, self.get_conf('lambda_ref_rougi', message='error'))
log_.message('Reddening correction set to {0}'.format(self.E_BV), calling=self.calling)
else:
self.red_corr = np.ones_like(self.w)
def update_user_cont(self):
user_cont = np.zeros_like(self.w)
if self.get_conf('cont_user_table') is not None:
try:
x = np.array([i[0] for i in list(self.get_conf('cont_user_table'))])
y = np.array([i[1] for i in list(self.get_conf('cont_user_table'))])
kind = self.get_conf('cont_user_func')
if kind == 'cubic' and len(x) < 4:
kind = 'quadratic'
if kind == 'quadratic' and len(x) < 3:
kind = 'linear'
if kind == 'linear' and len(x) < 2:
kind = 'zero'
user_cont_int = interpolate.interp1d(x, y, kind=kind, fill_value=0, bounds_error=False)
user_cont = user_cont_int(self.w)
except:
self.errorMsg = 'Problem in user-defined continuum interpolation.'
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if kind not in kinds:
self.errorMsg += '\nInvalid function'
log_.message(self.errorMsg, calling = self.calling)
self.cont /= self.aire_ref
self.cont *= self.red_corr
self.cont = self.cont - self.conts['user'] + user_cont
self.cont *= self.aire_ref
self.cont /= self.red_corr
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
self.conts['user'] = user_cont
def cont_at(self, wave, side = '-'):
i_list = [i for i in range(len(self.w)-1) if self.w[i] <= wave <= self.w[i+1] or self.w[i+1] <= wave <= self.w[i]]
if len(i_list) == 1:
i = i_list[0]
if side == '+' and i+1 in range(len(self.w)):
return self.cont[i+1]
else:
return self.cont[i]
else:
return None
def make_continuum(self):
self.conts = {}
user_cont = np.zeros_like(self.w)
if self.get_conf('cont_user_table') is not None:
try:
x = np.array([i[0] for i in list(self.get_conf('cont_user_table'))])
y = np.array([i[1] for i in list(self.get_conf('cont_user_table'))])
kind = self.get_conf('cont_user_func')
if kind == 'cubic' and len(x) < 4:
kind = 'quadratic'
if kind == 'quadratic' and len(x) < 3:
kind = 'linear'
if kind == 'linear' and len(x) < 2:
kind = 'zero'
user_cont_int = interpolate.interp1d(x, y, kind=kind, fill_value=0, bounds_error=False)
user_cont = user_cont_int(self.w)
except:
self.errorMsg = 'Problem in user-defined continuum interpolation.'
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if kind not in kinds:
self.errorMsg += '\nInvalid function'
log_.message(self.errorMsg, calling = self.calling)
cont_pix = self.get_conf("cont_pix", 0.)
if cont_pix != 0:
arg_sort = np.array(cont_pix).argsort()
user_cont_int = interpolate.interp1d(np.array(cont_pix)[arg_sort],
np.array(self.get_conf("cont_intens", message='error'))[arg_sort])
user_cont = user_cont_int(self.tab_pix)
self.conts['user'] = user_cont
bb_cont = np.zeros_like(self.w)
if "cont_bb_t" in self.conf:
if np.ndim(self.conf["cont_bb_t"]) == 0:
tab_T = np.array([self.conf["cont_bb_t"]])
tab_I = np.array([self.conf["cont_bb_i"]])
else:
tab_T = np.array(self.conf["cont_bb_t"])
tab_I = np.array(self.conf["cont_bb_i"])
for I, T in zip(tab_I, tab_T):
bb_cont += I * Planck(self.w, T) / T**4
self.conts['bb'] = bb_cont
pl_cont = np.zeros_like(self.w) # Power law
if "cont_pl_alpha" in self.conf:
if np.ndim(self.conf["cont_pl_alpha"]) == 0:
tab_alpha = np.array([self.conf["cont_pl_alpha"]])
tab_I = np.array([self.conf["cont_pl_i"]])
else:
tab_alpha = np.array(self.conf["cont_pl_alpha"])
tab_I = np.array(self.conf["cont_pl_i"])
for I, alpha in zip(tab_I, tab_alpha):
pl_cont += I * (self.w / 5000.)**alpha
self.conts['pl'] = pl_cont
if self.conf["cont_hi_i"] != 0.:
alfa = 1e-13 * 0.668 * (self.conf["cont_hi_t"]/1e4)**(-0.507) / \
(1. + 1.221*(self.conf["cont_hi_t"]/1e4)**(0.653)) * 1.000
emis_Hi = alfa * CST.HPLANCK * CST.CLIGHT * 1e8 / 4861.3 # erg/s.cm3
H_cont = self.conf["cont_hi_i"] * make_cont_Ercolano(self.conf["cont_hi_t"],'H',airtovac(self.w)) / emis_Hi
H_cont[~np.isfinite(H_cont)] = 0.
self.conts['H'] = H_cont
else:
self.conts['H'] = np.zeros_like(self.w)
if self.conf["cont_hei_i"] != 0.0:
alfa = 1e-13 * 0.331 * (self.conf["cont_hei_t"]/1e4)**(-0.615) / \
(1. + 0.910*(self.conf["cont_hei_t"]/1e4)**(0.780)) * 0.7986
emis_Hei = alfa * CST.HPLANCK * CST.CLIGHT * 1e8 / 4471.5
He1_cont = self.conf["cont_hei_i"] * make_cont_Ercolano(self.conf["cont_hei_t"],'He1',airtovac(self.w)) / emis_Hei
He1_cont[~np.isfinite(He1_cont)] = 0.
self.conts['He1'] = He1_cont
else:
self.conts['He1'] = np.zeros_like(self.w)
if self.conf["cont_heii_i"] != 0.0:
alfa = 2. * 1e-13 * 1.549 * (self.conf["cont_heii_t"]/1e4/4.)**(-0.693) / \
(1. + 2.884*(self.conf["cont_heii_t"]/1e4/4.)**(0.609))*1.000
emis_Heii = alfa * CST.HPLANCK * CST.CLIGHT * 1e8 / 4685.8
He2_cont = self.conf["cont_heii_i"] * make_cont_Ercolano(self.conf["cont_heii_t"],'He2',airtovac(self.w)) / emis_Heii
He2_cont[~np.isfinite(He2_cont)] = 0.
self.conts['He2'] = He2_cont
else:
self.conts['He2'] = np.zeros_like(self.w)
gff_HI = gff(1., self.conf["cont_hi_t"], self.w)
gff_HeI = gff(1., self.conf["cont_hei_t"], self.w)
gff_HeII = gff(4., self.conf["cont_heii_t"], self.w)
#32.d0*!phy.e^4.*!phy.h/3./!phy.m_e^2./!phy.c^3.*sqrt(!dpi*13.6*!phy.erg_s_ev/3./!phy.k)= 6.8391014e-38
if self.conf["cont_hi_i"] != 0 and self.conf["cont_hei_i"] != 0 and self.conf["cont_heii_i"] != 0 :
FF_cont = (6.8391014e-38 * CST.CLIGHT * 1e8 / self.w**2. * (
self.conf["cont_hi_i"] * 1.0**2. / np.sqrt(self.conf["cont_hi_t"]) * np.exp(-CST.HPLANCK*CST.CLIGHT*1e8/self.w/CST.BOLTZMANN/self.conf["cont_hi_t"]) * gff_HI/emis_Hi +
self.conf["cont_hei_i"] * 1.0**2./ np.sqrt(self.conf["cont_hei_t"]) * np.exp(-CST.HPLANCK*CST.CLIGHT*1e8/self.w/CST.BOLTZMANN/self.conf["cont_hei_t"]) * gff_HeI/emis_Hei +
self.conf["cont_heii_i"] * 2.0**2. / np.sqrt(self.conf["cont_heii_t"]) * np.exp(-CST.HPLANCK*CST.CLIGHT*1e8/self.w/CST.BOLTZMANN/self.conf["cont_heii_t"]) * gff_HeII / emis_Heii))
FF_cont[~np.isfinite(FF_cont)] = 0.
self.conts['FF'] = FF_cont
else:
self.conts['FF'] = np.zeros_like(self.w)
# 2-photons
#http://adsabs.harvard.edu/abs/1984A%26A...138..495N
if self.conf["cont_hi_i"] != 0:
y = 1215.7 / self.w
A = 202.0 * (y * (1. - y) * (1. -(4. * y * (1 - y))**0.8) + 0.88 * ( y * (1 - y))**1.53 * (4. * y * (1 - y))**0.8)
alfa_eff = 0.838e-13 * (self.conf["cont_hi_t"] / 1e4)**(-0.728) # fit DP de Osterbrock
q = 5.31e-4 * (self.conf["cont_hi_t"] / 1e4)**(-0.17) # fit DP de Osterbrock
n_crit = 8.226 / q
twophot_cont = self.conf["cont_hi_i"] * CST.HPLANCK * CST.CLIGHT * 1e8 / self.w**3. * 1215.7 * A / 8.226 * alfa_eff / (1. + self.conf["cont_edens"]/n_crit) / emis_Hi
twophot_cont[~np.isfinite(twophot_cont)] = 0.
self.conts['2photons'] = twophot_cont
else:
self.conts['2photons'] = np.zeros_like(self.w)
self.cont = np.zeros_like(self.w)
for key in self.conts:
self.cont += self.conts[key]
self.cont *= self.aire_ref
self.cont /= self.red_corr
def plot_conts(self, ax, keys = None):
if self.sp_synth_lr is None:
return
colors = {'bb': 'cyan', 'pl': 'green', '2photons': 'blue', 'FF': 'red',
'H': 'red', 'He1': 'green', 'He2': 'blue', 'user': 'black'}
labels = {'bb': 'bb', 'pl': 'pl', '2photons': '2q', 'FF': 'ff',
'H': 'H I', 'He1': 'He I', 'He2': 'He II', 'user': 'user cont'}
if keys == None:
keys = self.conts.keys()
for key in keys:
if key[0] == 'H':
style=':'
else:
style = '-'
ax.plot(self.w, self.conts[key], linestyle=style, label = labels[key], color = colors[key])
if 'user' in keys:
if self.get_conf('cont_user_table') is not None:
x = np.array([i[0] for i in self.get_conf('cont_user_table')])
y = np.array([i[1] for i in self.get_conf('cont_user_table')])
ax.plot(x, y, marker='o', ms=6, color = colors['user'], ls = '')
y = [self.cont_at(w) for w in x]
y[0] = self.cont_at(x[0], '+')
ax.plot(x, y, marker='o', ms=8, color = 'green', ls = '')
ax.plot(self.w, self.cont, label = 'total cont', linestyle='--', linewidth = 1.5, color = 'green')
ax.legend()
def append_lists(self, phyat_arr, model_arr, cosmetik_arr):
n_models = len(model_arr)
liste_totale = phyat_arr.copy()
liste_totale.resize(len(phyat_arr) + len(model_arr))
for i,j in enumerate(np.arange(len(phyat_arr), len(phyat_arr) + len(model_arr))):
liste_totale[j] = model_arr[i]
sp_theo = {}
sp_theo['raie_ref'] = model_arr
sp_theo['correc'] = np.zeros(n_models)
sp_theo['spectr'] = np.zeros((n_models, len(self.w)))
if "do_icor_outside_cosmetik" in self.conf:
liste_totale.i_rel *= liste_totale.i_cor
liste_totale.i_cor = 1.
sp_theo['raie_ref'].i_rel *= sp_theo['raie_ref'].i_cor
sp_theo['raie_ref'].i_cor = 1.
if self.do_cosmetik:
for line_cosmetik in cosmetik_arr:
if (line_cosmetik['ref'] == 0) and not bool(self.conf['do_icor_on_ref']):
log_.warn('No cosmetik on {0}, reference line'.format(line_cosmetik['num']),
calling = self.calling)
else:
to_change = (liste_totale.num == line_cosmetik['num'])
if to_change.sum() == 1:
line_to_change = liste_totale[to_change][0]
if (line_to_change['lambda'] == line_cosmetik['lambda']) or (line_cosmetik['l_shift'] == 0.):
line_to_change['l_shift'] = line_cosmetik['l_shift']
if (line_to_change['i_rel'] == line_cosmetik['i_rel']) or (line_cosmetik['i_cor'] == 1.):
line_to_change['i_cor'] = line_cosmetik['i_cor']
liste_totale[to_change] = line_to_change
log_.debug('Cosmetik on {0}'.format([line_cosmetik]), calling=self.calling)
elif to_change.sum() == 0:
if self.get_conf('warn_on_no_cosmetik'):
log_.warn('No cosmetik on {0}, undefined line'.format(line_cosmetik['num']),
calling = self.calling)
else:
log_.warn('No cosmetik on {0}, multiple defined line'.format(line_cosmetik['num']),
calling = self.calling)
liste_raies = self.restric_liste(liste_totale)
log_.message('Size of the line list: {0}, size of the restricted line list: {1}'.format(len(liste_totale),
len(liste_raies)), calling=self.calling)
if self.do_cosmetik:
for line_cosmetik in cosmetik_arr:
if bool(self.conf['do_icor_on_ref']):
to_change = (liste_raies.num == line_cosmetik['num'])
if to_change.sum() == 1:
line_to_change = liste_raies[to_change][0]
line_to_change['vitesse'] *= line_cosmetik['vitesse']
if line_cosmetik['profile'] != -1:
line_to_change['profile'] = line_cosmetik['profile']
liste_raies[to_change] = line_to_change
return sp_theo, liste_totale, liste_raies
def restric_liste(self, liste_in):
"""
This function changes liste_in
"""
"""
We set ref=999 for all the lines depending on a 999 one.
"""
while True:
the_end = True
non_affich = np.where(liste_in['ref'] == 999)[0]
for i_999 in non_affich:
this_num = liste_in['num'][i_999]
dep_non_affich = ((liste_in['ref'] == this_num) & (liste_in['ref'] != 999))
if dep_non_affich.sum() != 0:
before = liste_in['ref'][dep_non_affich]
liste_in['ref'][dep_non_affich] = 999
log_.message('Before = {0}, After = {1}'.format(before, liste_in['ref'][dep_non_affich]), calling=self.calling)
the_end = False
if the_end:
break
where_restr = (((liste_in['lambda'] + liste_in['l_shift']) < np.max(self.w)) &
((liste_in['lambda'] + liste_in['l_shift']) > np.min(self.w)) &
(liste_in['ref'] != 999))
liste_out = liste_in.copy()[where_restr]
log_.message('Old size = {0}, new_size = {1}'.format(len(liste_in), len(liste_out)), calling=self.calling)
last_loop = 0
the_end = False
while True:
if last_loop == 1:
the_end = True
satellites = np.where(liste_out['ref'] != 0)[0]
for i_satellite in satellites[::-1]:
if liste_out['ref'][i_satellite] != -1:
raie_synth = liste_out[i_satellite].copy()
i_main_line = np.where(liste_in['num'] == raie_synth['ref'])[0]
if len(i_main_line) != 1:
if self.get_conf('warn_on_no_reference'):
log_.warn('Satellite sans raie de reference:{0} looking for {1}'.format(raie_synth['num'], raie_synth['ref']),
calling=self.calling)
raie_synth['i_rel'] = 0.0
raie_synth['comment'] = '!pas de ref' + raie_synth['comment']
else:
main_line = liste_in[i_main_line]
if main_line['ref'] != 0:
raie_synth['i_rel'] *= main_line['i_rel']
raie_synth['ref'] = main_line['ref']
if bool(self.conf['recursive_i_cor']):
raie_synth['i_cor'] *= main_line['i_cor']
raie_synth['profile'] = main_line['profile']
last_loop = 2
log_.debug('filling {0} with {1}'.format(liste_out[i_satellite]['num'],
main_line['num']),
calling = self.calling)
if last_loop == 1:
raie_synth['i_rel'] *= main_line['i_rel']
raie_synth['vitesse'] *= main_line['vitesse']
raie_synth['l_shift'] += main_line['l_shift']
if bool(self.conf['recursive_i_cor']):
raie_synth['i_cor'] *= main_line['i_cor']
raie_synth['profile'] = main_line['profile']
log_.debug('filling {0} with {1}, last loop'.format(liste_out[i_satellite]['num'],
main_line['num']),
calling = self.calling)
liste_out[i_satellite] = raie_synth
if last_loop == 0:
last_loop = 1
else:
last_loop = 0
if the_end:
break
tt = (np.abs(liste_out['i_rel']) > 1e-50)
log_.message('number of lines with i_rel > 1e-50: {0}'.format(tt.sum()), calling=self.calling)
return liste_out[tt]
def make_synth_test(self, liste_raies):
sp_theo = self.sp_theo.copy()
sp_synth = np.zeros_like(self.w)
sp_theo['spectr'] *= 0.0
sp_theo['correc'] *= 0.0
for raie in liste_raies:
#sp_tmp = self.profil_emis(self.w, raie, self.conf['lambda_shift'])
sp_tmp = self.get_profile(raie)
aire = np.trapz(sp_tmp, self.w)
if np.isfinite(aire) and (aire != 0.):
max_sp = np.max(sp_tmp)
if (np.abs(sp_tmp[0]/max_sp) > 1e-3) or (np.abs(sp_tmp[-1]/max_sp) > 1e-3):
log_.message('Area of {0} {1} could be wrong'.format(raie['id'], raie['lambda']),
calling = self.calling)
intens_pic = raie['i_rel'] * raie['i_cor'] * self.aire_ref / aire
if raie['ref'] == 0:
tab_tmp = (sp_theo['raie_ref'].num == raie['num'])
else:
tab_tmp = (sp_theo['raie_ref'].num == raie['ref'])
this_line = intens_pic * sp_tmp
if not no_red_corr(raie):
this_line /= self.red_corr
if not is_absorb(raie):
sp_synth += this_line
sp_theo['spectr'][tab_tmp] += this_line
sp_theo['correc'][tab_tmp] = 1.0
tt = (sp_theo['correc'] != 0.)
for key in ('correc', 'raie_ref', 'spectr'):
sp_theo[key] = sp_theo[key][tt]
log_.message('Number of theoretical spectra: {0}'.format(len(sp_theo['correc'])), calling=self.calling)
return sp_theo, sp_synth
def make_synth(self, liste_raies, sp_theo):
sp_synth = np.zeros_like(self.w)
sp_theo['spectr'] *= 0.0
sp_theo['correc'] *= 0.0
#TODO parallelize this loop
for raie in liste_raies:
#sp_tmp = self.profil_emis(self.w, raie, self.conf['lambda_shift'])
sp_tmp = self.get_profile(raie)
aire = np.trapz(sp_tmp, self.w)
if np.isfinite(aire) and (aire != 0.):
max_sp = np.max(sp_tmp)
if (np.abs(sp_tmp[0]/max_sp) > 1e-3) or (np.abs(sp_tmp[-1]/max_sp) > 1e-3):
log_.message('Area of {0} {1} could be wrong'.format(raie['id'], raie['lambda']),
calling = self.calling)
intens_pic = raie['i_rel'] * raie['i_cor'] * self.aire_ref / aire
log_.debug('{} aire = {}'.format(raie['num'], aire), calling=self.calling)
if raie['ref'] == 0:
tab_tmp = (sp_theo['raie_ref'].num == raie['num'])
else:
tab_tmp = (sp_theo['raie_ref'].num == raie['ref'])
this_line = intens_pic * sp_tmp
if not no_red_corr(raie):
this_line /= self.red_corr
if not is_absorb(raie):
sp_synth += this_line
sp_theo['spectr'][tab_tmp] += this_line
sp_theo['correc'][tab_tmp] = 1.0
log_.debug('doing line {}'.format(raie['num']), calling=self.calling)
tt = (sp_theo['correc'] != 0.)
for key in ('correc', 'raie_ref', 'spectr'):
sp_theo[key] = sp_theo[key][tt]
log_.message('Number of theoretical spectra: {0}'.format(len(sp_theo['correc'])), calling=self.calling)
return sp_theo, sp_synth
def make_sp_abs_original(self, sp_theo):
if sp_theo is None:
return None
sp_tau = np.zeros_like(self.w)
"""
WARNING check also misc.is_absorb(raie)
"""
index_abs = is_absorb(self.sp_theo['raie_ref'])
for i_abs in index_abs:
sp_tau += self.sp_theo['spectr'][i_abs] * self.sp_theo['correc'][i_abs]
sp_abs = np.exp(sp_tau)
if self.get_conf('fic_atm') is not None:
if type(self.get_conf('fic_atm')) not in (list, tuple):
self.conf['fic_atm'] = (self.conf['fic_atm'],)
self.conf['coeff_atm'] = (self.conf['coeff_atm'],)
self.conf['shift_atm'] = (self.conf['shift_atm'],)
if len(self.get_conf('fic_atm')) != len(self.get_conf('coeff_atm')):
log_.error('fic_atm number {} != coeff_atm number {}'.format(len(self.get_conf('fic_atm')), len(self.get_conf('coeff_atm'))),
calling = self.calling)
for fic_atm, coeff_atm, shift_atm in zip(self.get_conf('fic_atm'), self.get_conf('coeff_atm'), self.get_conf('shift_atm')):
try:
d = np.genfromtxt(fic_atm, dtype=None, names=('wl', 'abs'))
d['wl'] = vactoair(d['wl'], self.conf['vactoair_inf'], self.conf['vactoair_sup'])
if type(coeff_atm) not in (list, tuple):
coeff_atm = (coeff_atm, )
if type(shift_atm) not in (list, tuple):
shift_atm = (shift_atm, )
for c_atm, s_atm in zip(coeff_atm, shift_atm):
abs_interp = interpolate.interp1d(d['wl']*(1+s_atm/CST.CLIGHT*1e5), d['abs'])
sp_abs *= np.exp(np.log(abs_interp(self.w)) * c_atm)
except:
log_.warn('Problem in using data from {}'.format(fic_atm),
calling = self.calling)
# sp_abs /= self.red_corr
return sp_abs
def make_sp_abs(self, sp_theo, index_abs=None):
if sp_theo is None:
return None
sp_tau = np.zeros_like(self.w)
"""
WARNING check also misc.is_absorb(raie)
"""
if index_abs is None:
index_abs = is_absorb(self.sp_theo['raie_ref'])
for i_abs in index_abs:
sp_tau += self.sp_theo['spectr'][i_abs] * self.sp_theo['correc'][i_abs]
sp_abs = np.exp(sp_tau)
if self.get_conf('fic_atm') is not None:
if type(self.get_conf('fic_atm')) not in (list, tuple):
self.conf['fic_atm'] = (self.conf['fic_atm'],)
self.conf['coeff_atm'] = (self.conf['coeff_atm'],)
self.conf['shift_atm'] = (self.conf['shift_atm'],)
if len(self.get_conf('fic_atm')) != len(self.get_conf('coeff_atm')):
log_.error('fic_atm number {} != coeff_atm number {}'.format(len(self.get_conf('fic_atm')), len(self.get_conf('coeff_atm'))),
calling = self.calling)
for fic_atm, coeff_atm, shift_atm in zip(self.get_conf('fic_atm'), self.get_conf('coeff_atm'), self.get_conf('shift_atm')):
try:
d = np.genfromtxt(fic_atm, dtype=None, names=('wl', 'abs'))
d['wl'] = vactoair(d['wl'], self.conf['vactoair_inf'], self.conf['vactoair_sup'])
if type(coeff_atm) not in (list, tuple):
coeff_atm = (coeff_atm, )
if type(shift_atm) not in (list, tuple):
shift_atm = (shift_atm, )
for c_atm, s_atm in zip(coeff_atm, shift_atm):
abs_interp = interpolate.interp1d(d['wl']*(1+s_atm/CST.CLIGHT*1e5), d['abs'])
sp_abs *= np.exp(np.log(abs_interp(self.w)) * c_atm)
except:
log_.warn('Problem in using data from {}'.format(fic_atm),
calling = self.calling)
# sp_abs /= self.red_corr
return sp_abs
def make_filter_instr(self):
if self.sp_synth is None:
self.filter_ = None
return None
filter_size = 11
increm = 1.1
detect_limit = 1e-3 / np.max(self.sp_synth)
while True:
filter_size = int(filter_size * increm)
if filter_size/2*2 == filter_size:
filter_size += 1
if filter_size > self.n_lambda:
break
self.filter_ = self.profil_instr(filter_size, self.conf['instr_prof'], self.lambda_pix)
if (abs(self.filter_[0]) < detect_limit) and (abs(self.filter_[-1]) < detect_limit):
break
self.filter_ /= self.filter_.sum()
def convol_synth(self, cont, sp_synth):
if sp_synth is None:
return None
input_arr = (cont + sp_synth) * self.sp_abs
kernel = self.filter_
sp_synth_tot = convol(input_arr, kernel)
return sp_synth_tot
def rebin_on_obs(self):
if self.sp_synth_tot is None:
return None, None
resol = self.get_conf('resol', undefined = 1, message=None)
cont_lr = rebin(self.cont, resol)
sp_synth_lr = rebin(self.sp_synth_tot, resol)
return cont_lr, sp_synth_lr
def adjust(self):
spectr0 = self.sp_theo['spectr'].copy()
new_model_arr, errorMsg = self.read_model(self.fic_model)
if len(errorMsg) > 0:
return -1, errorMsg
new_cosmetik_arr, errorMsg = self.read_cosmetik()
if len(errorMsg) > 0:
return -1, errorMsg
new_sp_theo, new_liste_totale, new_liste_raies = self.append_lists(self.phyat_arr, new_model_arr, new_cosmetik_arr)
mask_diff = np.zeros(len(new_liste_raies), dtype=bool)
for key in ('lambda', 'l_shift', 'i_rel', 'i_cor', 'vitesse', 'profile'):
mask_diff = mask_diff | (new_liste_raies[key] != self.liste_raies[key])
log_.debug('{} differences in lines from files'.format(mask_diff.sum()),
calling=self.calling + ' adjust')
ref_diff = self.compare_profiles()
log_.debug('{} differences in profile'.format(len(ref_diff)),
calling=self.calling + ' adjust')
for im, l in enumerate(new_liste_raies.profile):
if np.str(l) in ref_diff:
mask_diff[im] = True
if mask_diff.sum() > 0 and len(mask_diff) == len(self.liste_raies):
old_sp_theo = self.sp_theo.copy()
if len(new_sp_theo) != len(old_sp_theo):
log_.error('The new list has different number of elements',
calling = self.calling+'.adjust')
liste_old_diff = self.liste_raies[mask_diff]
old_sp_theo, old_sp_synth = self.make_synth(liste_old_diff, old_sp_theo)
if len(ref_diff) > 0:
self.do_profile_dict()
liste_new_diff = new_liste_raies[mask_diff]
new_sp_theo, new_sp_synth = self.make_synth(liste_new_diff, new_sp_theo)
if log_.level >= 3:
print('Old values:')
self.print_line(liste_old_diff)
print('New values:')
self.print_line(liste_new_diff)
do_abs = False
self.sp_theo['spectr'] = spectr0
old_sp_abs = self.make_sp_abs(old_sp_theo)
for i_change in np.arange(len(new_sp_theo['raie_ref'])):
to_change = (self.sp_theo['raie_ref']['num'] == new_sp_theo['raie_ref'][i_change]['num'])
new_sp_theo['correc'][i_change] = self.sp_theo['correc'][to_change].copy()
old_sp_theo['correc'][i_change] = self.sp_theo['correc'][to_change].copy()
if (new_sp_theo['raie_ref'][i_change]['i_rel'] != old_sp_theo['raie_ref'][i_change]['i_rel']):
new_sp_theo['correc'][i_change] = 1.0
self.sp_theo['spectr'][to_change] += new_sp_theo['spectr'][i_change] - old_sp_theo['spectr'][i_change]
self.sp_theo['raie_ref'][to_change] = new_sp_theo['raie_ref'][i_change]
self.sp_theo['correc'][to_change] = new_sp_theo['correc'][i_change]
if is_absorb(new_sp_theo['raie_ref'][i_change]):
do_abs = True
else:
self.sp_synth += (new_sp_theo['correc'][i_change] * new_sp_theo['spectr'][i_change] -
old_sp_theo['correc'][i_change] * old_sp_theo['spectr'][i_change])
log_.message('change line {0}'.format(new_sp_theo['raie_ref'][i_change]['num']),
calling=self.calling + ' adjust')
if do_abs:
self.sp_abs = self.sp_abs/old_sp_abs*self.make_sp_abs(self.sp_theo)
self.liste_raies = new_liste_raies
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
log_.message('{} differences'.format(mask_diff.sum()), calling=self.calling + ' adjust')
return mask_diff.sum(), errorMsg
#self.update_plot2()
# def modif_intens(self, raie_num, fact):
#
# if fact <= 0.:
# log_.error('fact must be >0. {0}'.format(fact))
# return None
# a_changer = (self.sp_theo['raie_ref']['num'] == raie_num)
# if a_changer.sum() == 1:
# old_correc = self.sp_theo['correc'][a_changer][0]
# if is_absorb(self.sp_theo['raie_ref'][a_changer]):
# sp_abs_old = self.make_sp_abs(self.sp_theo[a_changer])
# self.sp_theo['correc'][a_changer] = fact
# sp_abs_new = self.make_sp_abs(self.sp_theo[a_changer])
# self.sp_abs = self.sp_abs - sp_abs_old + sp_abs_new
# else:
# self.sp_synth += (fact-old_correc) * self.sp_theo['spectr'][a_changer][0]
# self.sp_theo['correc'][a_changer] = fact
# #self.sp_theo['spectr'][a_changer] *= fact/old_correc # adding this break the tool
# self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
# self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
# self.update_plot2()
#
def print_line(self, line, sort='lambda', reverse=False):
if type(line) == np.core.records.recarray:
sorts = np.argsort(line[sort])
if reverse:
sorts = sorts[::-1]
for isort in sorts:
self.print_line(line[isort])
return
print('{0[num]:>14d} {0[id]:9s}{0[lambda]:11.3f}{0[l_shift]:6.3f}{0[i_rel]:10.3e}{0[i_cor]:7.3f}'\
' {0[ref]:>14d}{0[profile]:5d}{0[vitesse]:7.2f}{1:1s}'.format(line, line['comment'].strip()))
def get_line_info(self, line_num, sort='lambda', reverse=False):
line = None
refline = None
satellites = None
refline_num = -1
to_select = (self.liste_raies['num'] == line_num)
if to_select.sum() > 0:
line = self.liste_raies[to_select][0]
refline_num = line['ref']
if line is None:
refline_num = line_num
to_select = (self.sp_theo['raie_ref']['num'] == refline_num)
if to_select.sum() > 0:
refline = self.sp_theo['raie_ref'][to_select][0]
to_select = (self.liste_raies['ref'] == refline_num)
satellites = self.liste_raies[to_select]
order = np.argsort(satellites[sort])
satellites = np.array(satellites)[order]
return line, refline, satellites
def read_satellites(self, filename, refline_num):
with open(filename, 'r') as f:
satellites = []
for eachline in f:
if int(self.fieldStrFromLine(eachline,'ref')) == refline_num:
satellites.append(eachline)
return satellites
def cosmetic_line_unchanged_old(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.read_line(self.phyat_file, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
line = line.rstrip()
keys = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse']
v0 = {i: np.float(self.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.fieldStrFromLine(line_c, i)) for i in keys}
if v0 == v1:
return True
else:
return False
def cosmetic_line_unchanged(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.get_line(self.phyat_arr, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
keys = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse']
v0 = {i: np.float(line[i]) for i in keys}
v1 = {i: np.float(self.fieldStrFromLine(line_c, i)) for i in keys}
if v0 == v1:
return True
else:
return False
def cosmetic_line_ok_old(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.read_line(self.phyat_file, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
line = line.rstrip()
keys = [ 'lambda', 'i_rel' ]
v0 = {i: np.float(self.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.fieldStrFromLine(line_c, i)) for i in keys}
if v0['i_rel'] != v1['i_rel'] or v0['lambda'] != v1['lambda']:
log_.warn('Error in cosmetic file for line {}\n'.format(str(line_num)), calling=self.calling)
log_.warn('(cosmetic) ' + line_c, calling=self.calling)
log_.warn('(database) ' + line, calling=self.calling)
return False
else:
return True
def cosmetic_line_ok(self, line_c):
if line_c == None:
return None
line_num = int(self.fieldStrFromLine(line_c,'num'))
line = self.get_line(self.phyat_arr, line_num)
if line == None:
log_.warn('Error in cosmetic file: line {0:} does not exist in the atomic database\n'.format(str(line_num)), calling=self.calling)
return None
else:
line_c_i_rel = np.float(self.fieldStrFromLine(line_c, 'i_rel'))
line_c_lambda = np.float(self.fieldStrFromLine(line_c, 'lambda'))
if line['i_rel'] != line_c_i_rel or line['lambda'] != line_c_lambda:
log_.warn('Error in cosmetic file for line {}\n'.format(str(line_num)), calling=self.calling)
log_.warn('(cosmetic) {}'.format(line_c), calling=self.calling)
log_.warn('(database) {}'.format(line), calling=self.calling)
log_.warn('lambda rel error {}'.format((line['lambda'] / line_c_lambda)/line_c_lambda), calling=self.calling)
return False
else:
return True
def read_line(self, filename, line_num):
line = None
line_num_str = str(line_num)
k = len(line_num_str)
if not os.path.isfile(filename):
return None
else:
with open(filename, 'r') as f:
line = None
for eachline in f:
s = self.fieldStrFromLine(eachline,'num')
s = str(int(s))
if (int(s) == line_num) or (s[:k] == line_num_str and s[k:].strip('0') == ''):
line = eachline
break
log_.debug('Reading line {} from {}'.format(line_num, filename), calling=self.calling+'.read_line')
return line
def get_line(self, arr, line_num):
mask = arr['num'] == int(line_num)
if mask.sum() == 1:
line = arr[mask][0]
else:
line = None
return line
def fmt(self, field, value):
fmt = self.field_format[field]
return fmt.format(value)
def replace_field(self, line, field, value):
w = self.field_width[field]
if len(value) > w:
return None
elif len(value) < w:
a = self.field_align[field]
value = '{:{a}{w}}'.format(value)
j = self.field_pos[field]
k = j + w
line = line[:j] + value + line[k:]
return line
def remove_line(self, filename, line_num):
line = self.read_line(filename, line_num)
if line == None:
return False
if not os.path.isfile(filename):
return False
else:
f = open(filename, 'r')
lines = f.readlines()
f.close()
i = lines.index(line)
if i >= 0:
del lines[i]
with open(filename, 'w') as f:
f.writelines(lines)
return True
else:
return False
def replace_line(self, filename, line):
line_num = int(self.fieldStrFromLine(line,'num'))
if os.path.isfile(filename):
lineNotFound = True
with open(filename, 'r') as f:
lines = f.read().splitlines()
for i in range(0, len(lines)):
curr_line = lines[i]
if int(self.fieldStrFromLine(curr_line,'num')) == line_num:
lines[i] = line + '\n'
lineNotFound = False
else:
lines[i] = lines[i] + '\n'
if lineNotFound:
lines.append(line)
else:
lines = [line]
with open(filename, 'w') as f:
f.writelines(lines)
def fieldStrFromLine(self, lineOfFile, field):
if lineOfFile == None:
return None
fieldStr = None
if field in self.fields:
i = self.field_pos[field]
j = i+self.field_width[field]
fieldStr = lineOfFile[i:j]
return fieldStr
def line_info(self, line_num, sat_info=True, print_header=True, sort='lambda', reverse=False):
if print_header:
print('\n{0:-^45}'.format(' INFO LINES '))
if type(line_num) == type(()) or type(line_num) == type([]):
for line in line_num:
self.line_info(line, sat_info=sat_info, print_header=False)
return
to_print = (self.liste_raies['num'] == line_num)
if to_print.sum() == 1:
raie = self.liste_raies[to_print][0]
self.print_line(raie)
if raie['ref'] != 0 and sat_info:
print('\nSatellite line of:')
self.line_info(raie['ref'], print_header=False)
to_print = (self.sp_theo['raie_ref']['num'] == line_num)
if to_print.sum() > 0 and sat_info:
raie = self.sp_theo['raie_ref'][to_print][0]
self.print_line(raie)
print('')
satellites_tab = (self.liste_raies['ref'] == raie['num'])
Nsat = satellites_tab.sum()
if Nsat > 0:
print('{0} satellites'.format(Nsat))
self.print_line(self.liste_raies[satellites_tab], sort=sort, reverse=reverse)
if self.sp_theo['correc'][to_print][0] != 1.0:
print('Intensity corrected by {0}'.format(self.sp_theo['correc'][to_print][0]))
if print_header:
print('-'*45)
def get_ref_list(self, ions):
ref_list = []
for ion in ions:
i_ion = np.where(self.sp_theo['raie_ref']['id'] == ion.ljust(9))[0]
if len(i_ion) == 0:
ref_list.append(-1)
for i in i_ion:
ref_list.append(self.sp_theo['raie_ref'][i][0])
return ref_list
def set_ion_list(self):
l = list(set(self.sp_theo['raie_ref']['id']))
self.ion_list = list(set(self.liste_raies['id']))
self.true_ion_list = list(set([self.true_ion(ion) for ion in self.ion_list]))
def get_element_and_int_ion(self, ion):
ion = self.true_ion(ion)
k = ion.find('_')
if k > -1 and self.isRoman(ion[k+1:]):
element = ion[:k]
int_ion = self.roman_to_int(ion[k+1:])
else:
element = ion
int_ion = 999
return element, int_ion
def get_ion_int_from_ion_str(self, ion_str):
k_list = np.where(self.sp_theo['raie_ref']['id'] == self.fmt('id', ion_str))[0]
if len(k_list) > 0:
return self.sp_theo['raie_ref'][k_list][0]['num']/1000000000
else:
return -1
def set_selected_ions_data(self):
color = 'dummy'
linestyle = 'dummy'
pos_label = 0
pos_ion = 1
pos_ref = 2
pos_i_ion = 3
pos_proc = 4
pos_color = 5
pos_linestyle = 6
ions = []
selected_ions = self.get_conf('selected_ions')
for ion in selected_ions:
if ion not in ions:
ions.append(ion)
selected_ions = ions
colors = self.get_conf('color_selected_ions')
linestyles = [ 'solid', 'dashed', 'dashdot', 'dotted' ]
label_list = []
ref_list = []
proc_type = self.get_conf('process_code_format')
label_list = []
if self.get_conf('diff_lines_by') == 1:
for ion in selected_ions:
if ion == self.true_ion(ion) and not self.isPseudoIon(ion):
ion_int = self.get_ion_int_from_ion_str(ion)
for i in range(len(proc_type)):
ref_set = set()
for j in proc_type[i][0]:
proc = ion_int*10+j
i_list = np.where(self.liste_raies['num']/100000000 == proc)
if len(i_list) > 0:
ref_set = ref_set.union(self.liste_raies['ref'][[i_list][0]])
if len(ref_set) > 0:
ref_list = list(ref_set)
i_ion = self.get_ref_index_from_ref_list(ref_list)
label = proc_type[i][1].format(ion)
label_list.append([label, [ion], ref_list, i_ion, proc_type[i][0], color, linestyle])
else:
i_list = np.where(self.liste_raies['id'] == self.fmt('id', ion))
if len(i_list) > 0:
ref_list = list(set(self.liste_raies['ref'][[i_list][0]]))
proc_set = set()
"""
for line in ref_list:
proc = int(str(line)[-9])
proc_set.add(proc)
"""
proc_set = {ion}
i_ion = self.get_ref_index_from_ref_list(ref_list)
label_list.append([ion, [self.true_ion(ion)], ref_list, i_ion, list(proc_set), color, linestyle])
else:
for ion in selected_ions:
ion = self.true_ion(ion)
all_ions = self.get_all_ions_from_ion(ion)
i_ion = set()
for subion in all_ions:
i_ion = i_ion.union(np.where(self.sp_theo['raie_ref']['id'] == subion.ljust(9))[0])
i_ion = list(i_ion)
ref_list = []
for i in range(0, len(i_ion)):
ref_line = self.sp_theo['raie_ref'][i_ion[i]]['num']
ref_list.append(ref_line)
if self.get_conf('diff_lines_by') == 0:
for i in range(0, len(i_ion)):
ref_line = ref_list[i]
refline_str = str(ref_line).strip('0')
label = ion + ' (' + refline_str + ')'
label_list.append([label, [ion], [ref_line], list(i_ion[i:i+1]), [], color, linestyle])
else:
label_list.append([ion, [ion], ref_list, list(i_ion), [], color, linestyle])
# sorting
if self.get_conf('selected_ions_sort'):
for i in range(0,len(label_list)-1):
label1 = label_list[i][pos_label]
true_ion1 = self.true_ion(label1)
element1, int_ion1 = self.get_element_and_int_ion(true_ion1)
for j in range(i+1, len(label_list)):
label2 = label_list[j][pos_label]
true_ion2 = self.true_ion(label2)
element2, int_ion2 = self.get_element_and_int_ion(true_ion2)
if (element2 < element1) or ((element2 == element1) and (int_ion2 < int_ion1)) or ((true_ion2 == true_ion1) and (label2 < label1)):
prov = label_list[i]
label_list[i] = label_list[j]
label_list[j] = prov
label1 = label2
true_ion1 = true_ion2
element1 = element2
int_ion1 = int_ion2
if self.get_conf('diff_lines_by') == 3:
i = 0
while i < len(label_list)-1:
ion = label_list[i][pos_ion][0]
ion_set = set()
ion_set.add(ion)
element = self.element(ion)
j = i+1
while j < len(label_list):
ion2 = label_list[j][pos_ion][0]
element2 = self.element(ion2)
if element2 == element:
ion_set.add(str(ion2))
ref_list = list(set(label_list[i][pos_ref] + label_list[j][pos_ref]))
i_ion = list(set(label_list[i][pos_i_ion] + label_list[j][pos_i_ion]))
label_list.pop(j)
label_list[i][pos_ion] = list(ion_set)
label_list[i][pos_ref] = ref_list
label_list[i][pos_i_ion] = i_ion
else:
j += 1
i += 1
for i in range(len(label_list)):
ion_list = label_list[i][pos_ion]
ion_label = label_list[i][pos_label]
ion_list.sort()
ion = ion_list[0]
for j in range(1, len(ion_list)):
s = ion_list[j]
k = s.index('_')
if k > -1:
s = s[k+1:]
ion = ion + '+' + s
label_list[i][pos_label] = ion
for k in range(0, len(label_list)):
color = colors[k%len(colors)]
linestyle = linestyles[(k/len(colors))%len(linestyles)]
label_list[k][pos_color] = color
label_list[k][pos_linestyle] = linestyle
self.selected_ions_data = label_list
return
def get_refline_lists(self, ions):
ref_code_list = []
ref_index_list = []
ref_label_list = []
for ion in ions:
ion = self.true_ion(ion)
i_ion = np.where(self.sp_theo['raie_ref']['id'] == ion.ljust(9))[0]
if len(i_ion) == 0:
ref_code_list.append(-1)
ref_index_list.append(-1)
ref_label_list.append(ion.replace('_',' ') + ' (no lines)')
for i in i_ion:
ref_code_list.append(self.sp_theo['raie_ref'][i][0])
ref_index_list.append(i)
if len(i_ion) == 1:
ref_label_list.append(ion.replace('_',' '))
else:
ref_label_list.append(ion.replace('_',' ')+ ' - ' + str(np.where(i_ion==i)[0][0]))
# ref_label_list.append(ion.replace('_',' ')+ ' - ' + str(self.sp_theo['raie_ref'][i][0])[:-8])
return ref_code_list, ref_index_list, ref_label_list
def get_ref_index_list(self, ions):
ref_index_list = []
for ion in ions:
ion = self.true_ion(ion)
i_ion = np.where(self.sp_theo['raie_ref']['id'] == ion.ljust(9))[0]
for i in i_ion:
ref_index_list.append(i)
return ref_index_list
def get_ref_index_from_ref_list(self, ref_list):
ref_index_list = []
for ref_num in ref_list:
i_ion = np.where(self.sp_theo['raie_ref']['num'] == ref_num)[0]
for i in i_ion:
ref_index_list.append(i)
return ref_index_list
def get_line_from_reduce_code(self, code_str):
if not code_str.isdigit():
line = None
else:
line = self.read_line(self.phyat_file, int(code_str))
if line is None:
line = self.read_line(self.fic_model, int(code_str))
return line
def get_refline_from_code(self, code_str):
s = ''
for line in self.liste_raies:
if code_str == str(line[0]):
s = line['ref']
return s
def get_ion_from_code(self,code_str):
s = ''
for line in self.liste_raies:
if code_str == str(line[0]):
s = line['id']
return s
def isRoman(self, s):
isRom = True
if len(s.strip()) == 0:
isRom = False
else:
for ch in s:
if ch not in ['I', 'V', 'X', 'L']:
isRom = False
return isRom
def roman_to_int(self, s):
x = {'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1}
s = s.strip()
if len(s) == 0:
n = -1
elif len(s) == 1:
if s == 'I':
n = 1
elif s == 'V':
n = 5
elif s == 'X':
n = 10
else:
n = sum([x[i] if x[i] >= x[j] else -x[i] for i, j in zip(s, s[1:])]) + x[j]
return n
def isPseudoIon(self, ion):
ion = self.true_ion(ion)
k = ion.rfind('_')
if k > 0 and self.isRoman(ion[k+1:]):
return False
else:
return True
def true_ion(self, ion):
k = ion.find('_')
if k > 0:
s = ion[k+1:]
while len(s) > 0 and s[-1] not in [ 'I', 'V', 'X' ]:
s = s[:-1]
if self.isRoman(s):
ion = ion[:k+1] + s
return ion.strip()
def get_all_ions_from_ion(self, ion):
ion = self.true_ion(ion)
ion_list = [ion]
k = len(ion)
for s in self.ion_list:
if len(s) > k and s[:k] == ion and s[k] not in [ 'I', 'V', 'X' ]:
ion_list.append(s.strip())
return list(set(ion_list))
def element(self, ion_str):
k = ion_str.find('_')
if k > -1:
return ion_str[:k]
else:
return ion_str.strip()
def get_ions_from_element(self, elem):
def charge(ion):
s = ion[ion.index('_')+1:]
if self.isRoman(s):
return self.roman_to_int(s)
else:
return s
ion_list = []
for line in self.liste_raies:
ion = str(line['id'])
if elem == self.element(ion):
ion_list.append(self.true_ion(ion))
ion_list = list(set(ion_list))
ion_list.sort(key=charge)
return ion_list
def save_lines(self):
if self.get_conf('show_selected_intensities_only'):
cut = self.get_conf('cut_plot2')
else:
cut = 0.0
ref_list = self.get_ref_list(self.get_conf('selected_ions'))
sort_list = [ 'lambda', 'i_rel', 'id' ]
k = self.get_conf('save_lines_sort')
sort = sort_list[k/2]
filename = self.get_conf('save_lines_filename')
extension = os.path.splitext(filename)[1][1:].lower()
sep = ' '
end = '\n'
if extension == 'tex':
sep = ' & '
end = ' {0}{0}{1}'.format('\\', '\n')
elif extension == 'csv':
sep = ' ; '
end = '\n'
sorts = np.argsort(self.liste_raies[sort])
if k%2 == 1:
sorts = sorts[::-1]
with open(filename, 'w') as f:
field_print = self.get_conf('save_lines_fields')
n = len(field_print)
if self.get_conf('save_lines_header'):
s = ''
for item in field_print:
f.write('{0:9s} : {1:>}\n'.format(item, self.field_tip[item]))
for item in field_print:
width = self.field_width[item]
align = '<'
if ( item == field_print[n-1] ):
add_s = end
else:
add_s = sep
s = s + str('{:{a}{w}s}{}'.format(item, add_s, a=align, w=width))
f.write('\n'+s+'\n')
for i_sort in sorts:
line = self.liste_raies[i_sort]
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
i_tot = line['i_rel'] * line['i_cor']
#if (abs(i_rel) > cut) and ( not self.get_conf('show_selected_ions_only') or line['ref'] in ref_list):
if (abs(i_tot) > cut) and ( not self.get_conf('show_selected_ions_only') or line['ref'] in ref_list):
s = ''
n = len(field_print)
for item in field_print:
thisformat = self.field_format[item]
if item == 'l_tot':
r = wl
elif item == 'i_tot':
r = i_tot
else:
r = line[item]
if item == 'l_shift':
s = s + ' '
s = s + str(thisformat.format(r))
if ( item == field_print[n-1] ):
s = s + end
else:
s = s + sep
f.write(s)
def plot1(self):
f, ax = plt.subplots()
ax.step(self.w_ori, self.f_ori, where='mid', label='Obs')
ax.step(self.w_ori, self.sp_synth_lr, where='mid', label='Synth')
ax.legend()
return f, ax
def plot2(self, hr=False, cut=None, split=False, do_ax2 = True, do_ax3 = True,
do_buttons=True, xlims=None, fontsize=12, legend_loc=1, fig=None,
magenta_ref=None, magenta_lab=None,
cyan_ref = None, cyan_lab=None, call_init_axes=True):
log_.message('entering plots, ID(ax1)'.format(id(self.fig1)), calling=self.calling)
self.hr = hr
self.split = split
self.do_ax2 = do_ax2
self.do_buttons = do_buttons
self.do_ax3 = do_ax3
if cut is not None:
self.set_conf('cut_plot2', cut)
self.ax2_fontsize = fontsize
self.legend_loc = legend_loc
if magenta_ref is not None:
self.plot_magenta = magenta_ref
self.label_magenta = magenta_lab
else:
self.plot_magenta = self.get_conf('plot_magenta')
self.label_magenta = self.get_conf('label_magenta')
if cyan_ref is not None:
self.plot_cyan = cyan_ref
self.label_cyan = cyan_lab
else:
self.plot_cyan = self.get_conf('plot_cyan')
self.label_cyan = self.get_conf('label_cyan')
if fig is None:
self.fig1 = plt.figure()
log_.message('creating new figure ID {}'.format(id(self.fig1)), calling=self.calling)
else:
self.fig1 = fig
self.fig1.clf()
log_.message('using argument figure ID: {} {}'.format(id(self.fig1), id(fig)), calling=self.calling)
if split:
if do_ax2:
self.fig2 = plt.figure()
if do_ax3:
self.fig3 = plt.figure()
self.ax1 = self.fig1.add_subplot(111)
if do_ax2:
self.ax2 = self.fig2.add_subplot(111, sharex=self.ax1)
if do_ax3:
self.ax3 = self.fig3.add_subplot(111, sharex=self.ax1)
else:
n_subplots = 1
i_ax2 = 2
i_ax3 = 2
if do_ax2:
n_subplots += 1
i_ax3 += 1
if do_ax3:
n_subplots += 1
self.ax1 = self.fig1.add_subplot(n_subplots, 1, 1)
if do_ax2:
self.ax2 = self.fig1.add_subplot(n_subplots, 1, i_ax2, sharex=self.ax1)
if do_ax3:
self.ax3 = self.fig1.add_subplot(n_subplots, 1, i_ax3, sharex=self.ax1)
self.plot_ax1(self.ax1, xlims=xlims)
if do_ax2:
self.plot_ax2(self.ax2)
if do_ax3:
self.plot_ax3(self.ax3)
if do_buttons:
self._make_buttons(split=split)
if call_init_axes:
self.init_axes()
self.restore_axes()
plt.subplots_adjust(hspace=0.0)
def plot_ax1(self, ax, xlims=None, show_legend=True):
if self.show_uncor_spec:
ax.step(self.w_obs, self.f_ori, where='mid', label='Uncorr', c='yellow', linewidth=1.5)
ax.step(self.w_ori, self.f_ori, where='mid', label='Obs', c='red', linewidth=1.5)
if self.sp_synth_lr is None:
return
self.ax1_line_synth = ax.step(self.w_ori, self.sp_synth_lr, where='mid', label='Synth', c='blue', linewidth=1.5)[0]
if self.hr:
ax.step(self.w, self.sp_synth, where='mid', c='green')
selected_ions = self.get_conf('selected_ions')
self.set_selected_ions_data()
label_list = self.selected_ions_data
pos_label = 0
pos_ion = 1
pos_ref = 2
pos_i_ion = 3
pos_proc = 4
pos_color = 5
pos_linestyle = 6
if selected_ions != [] and self.get_conf('plot_lines_of_selected_ions'):
j = self.get_conf('index_of_current_ion')
if j in range(0, len(selected_ions)):
ions = []
ions.append( selected_ions[j] )
else:
ions = selected_ions
if j in range(0, len(label_list)):
label_list = label_list[j:j+1]
for item in label_list:
label = item[pos_label].replace('_',' ')
i_ion = item[pos_i_ion]
color = item[pos_color]
linestyle = item[pos_linestyle]
y = 0
for i in range(0,len(i_ion)):
y = y + self.sp_theo['spectr'][i_ion][i]
ax.step(self.w, self.cont+y, where='mid', c=color, label=label, linestyle=linestyle )[0]
if show_legend:
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
else:
ax.legend().set_visible(False)
log_.debug('ax1 drawn on ax ID {}'.format(id(ax)), calling=self.calling)
# mvfc: old routine, still needed to run without Qt4 or Qt5
def plot_ax2(self, ax):
if self.sp_synth_lr is None:
return
for line in self.liste_raies:
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
if (abs(i_rel) > self.get_conf('cut_plot2')):
ax.axvline( wl, ymin=0.2, ymax=0.8, color = 'blue', linestyle = 'solid', linewidth = 1.5 )
# ax.plot([wl, wl], [0, 1], color='blue')
# ax.text(wl, -0.2, '{0} {1:7.4f}'.format(line['id'], i_rel),
# rotation='vertical', fontsize=self.ax2_fontsize).set_clip_on(True)
log_.debug('ax2 drawn on ax ID {}'.format(id(ax)), calling=self.calling)
def plot_line_ticks(self, ax, y1, y2, wmin=0., wmax=20000., show_legend=True):
pos_label = 0
pos_ion = 1
pos_ref = 2
pos_i_ion = 3
pos_proc = 4
pos_color = 5
pos_linestyle = 6
dy = (y2-y1)*0.15
#dy = (y2-y1)/2
if self.sp_synth_lr is None:
return
lcolor = self.get_conf('line_tick_color')
label_list = self.selected_ions_data
j = self.get_conf('index_of_current_ion')
if j in range(0, len(label_list)):
label_list = label_list[j:j+1]
for line in self.liste_raies:
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
i_tot = line['i_rel'] * line['i_cor']
if (wmin < wl) and (wl < wmax) and ((abs(i_tot) > self.get_conf('cut_plot2')) or ( not self.get_conf('show_selected_intensities_only'))):
if not self.get_conf('show_selected_ions_only'):
ax.axvline( wl, ymin=y1+dy, ymax=y2-dy, color = lcolor, linestyle = 'solid' )
#ax.axvline( wl, ymin=y1+dy, ymax=y2, color = lcolor, linestyle = 'solid' )
refline = line['ref']
ion = line['id'].strip()
proc = int(str(line['num'])[-9])
for item in label_list:
label = item[pos_label]
ion_list = item[pos_ion]
ref_list = item[pos_ref]
color = item[pos_color]
linestyle = item[pos_linestyle]
proc_list = item[pos_proc]
if refline in ref_list and self.true_ion(ion) in ion_list and ( self.get_conf('diff_lines_by') != 1 or proc in proc_list or ion in proc_list ):
ax.axvline( wl, ymin=y1, ymax=y2, color = color, linestyle = linestyle, linewidth = 1.5 )
# To add ticks to the legend of the figure when the spectrum of the selected ions are not plotted
if show_legend:
if not self.get_conf('plot_lines_of_selected_ions') or self.get_conf('line_tick_ax') == 1:
for item in label_list:
label = item[pos_label].replace('_',' ')
color = item[pos_color]
linestyle = item[pos_linestyle]
ax.step( [0,0], [0,100], color = color, linestyle = linestyle, label = label )
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
else:
ax.legend().set_visible(False)
log_.debug('Line ticks drawn on ax ID {}'.format(id(ax)), calling=self.calling)
def plot_line_ticks_for(self, satellites, ion, line_num, refline, ax, y1, y2, wmin=0., wmax=20000., addGreenTickToLegend=True):
if self.sp_synth_lr is None:
return
l_shift_refline = np.float(self.fieldStrFromLine(refline,'l_shift'))
ion = ion.replace('_',' ').strip()
line_num = line_num.strip().strip('0')
label = ion + ' (' + line_num + ')'
color = 'green'
for line in satellites:
wl = np.float(self.fieldStrFromLine(line,'lambda')) + \
np.float(self.fieldStrFromLine(line,'l_shift')) + \
self.conf['lambda_shift'] + l_shift_refline
if (wmin < wl) and (wl < wmax):
ax.axvline( wl, ymin=y1, ymax=y2, color = color, linestyle = 'solid', linewidth = 2.5 )
if addGreenTickToLegend:
ax.step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
log_.debug('Line ticks drawn on ax ID {} for line {}'.format(id(ax), line_num), calling=self.calling)
def plot_ax3(self, ax, show_legend=True):
if self.sp_synth_lr is not None:
ax.plot((0, 1e10), (0.0, 0.0), c='green')
#ax.step(self.w, self.f - self.cont, where='mid', c = 'red', linestyle='--')
#ax.step(self.w_ori, self.f_ori - self.cont_lr, where='mid', label='Obs-Cont', c='red', linewidth=2.0, alpha=0.5)
#ax.step(self.w, self.sp_abs*5, where='mid', label='Abs', c='magenta')
ax.step(self.w_ori, self.f_ori - self.cont_lr, where='mid', label='Obs-Cont', c=(1.0, 0.0, 0.0, 0.5), linewidth=1.0)
ax.step(self.w_ori, self.f_ori - self.sp_synth_lr, where='mid', label='Obs-Synth', c='blue', linewidth=1.5)[0]
if show_legend:
ax.legend(loc=self.legend_loc, fontsize=self.legend_fontsize)
else:
ax.legend().set_visible(False)
log_.debug('ax3 drawn on ax ID {}'.format(id(ax)), calling=self.calling)
def update_plot2(self):
if self.ax1 is None:
return
if self.sp_synth_lr is None:
return
self.ax1_line_synth.remove()
self.ax1_line_synth = self.ax1.step(self.w_ori, self.sp_synth_lr, where='mid', label='Synth', c='blue', linewidth=1.5)[0]
self.ax1.legend(loc=self.legend_loc)
if self.plot_magenta is not None:
try:
self.ax1_line_magenta.remove()
except:
pass
i_magenta = np.where(self.sp_theo['raie_ref']['num'] == self.plot_magenta)[0]
if self.label_magenta is None:
self.label_magenta = self.sp_theo['raie_ref'][i_magenta]['id']
if len(i_magenta) == 1:
self.ax1_line_magenta = self.ax1.step(self.w, self.cont+self.sp_theo['spectr'][i_magenta][0], where='mid', c='magenta',
label=self.label_magenta, linestyle='--')[0]
if self.plot_cyan is not None:
try:
self.ax1_line_cyan.remove()
except:
pass
i_cyan = np.where(self.sp_theo['raie_ref']['num'] == self.plot_cyan)[0]
if self.label_cyan is None:
self.label_cyan = self.sp_theo['raie_ref'][i_cyan]['id']
if len(i_cyan) == 1:
self.ax1_line_cyan = self.ax1.step(self.w, self.cont+self.sp_theo['spectr'][i_cyan][0], where='mid', c='cyan',
label=self.label_cyan, linestyle='-')[0]
for i in np.arange(len(self.ax2.texts)):
self.ax2.texts.pop()
for i in np.arange(len(self.ax2.lines)):
self.ax2.lines.pop()
i_max = np.max(self.liste_raies['i_rel'])
for line in self.liste_raies:
wl = line['lambda'] + line['l_shift'] + self.conf['lambda_shift']
i_rel = line['i_rel']
if (abs(i_rel) > self.get_conf('cut_plot2')) & (wl > self.ax2.get_xlim()[0]) & (wl < self.ax2.get_xlim()[1]):
self.ax2.axvline( wl, ymin=0.2, ymax=0.8, color = 'blue', linestyle = 'solid', linewidth = 1.5 )
#self.ax2.plot([wl, wl], [0, 1], color='blue')
#self.ax2.text(wl, -0.2, '{0} {1:7.4f}'.format(line['id'], i_rel),
# rotation='vertical', fontsize=self.ax2_fontsize)
#self.ax2.set_ylim((-1.5, 1))
if self.do_ax3:
#self.ax3_line_diff.remove()
self.ax3_line_diff = self.ax3.step(self.w_ori, self.f_ori - self.sp_synth_lr, where='mid', c='blue')[0]
self.fig1.canvas.draw()
def init_axes(self):
self.x_plot_lims = self.get_conf('x_plot_lims')
if self.x_plot_lims is None:
self.x_plot_lims = (np.min(self.w), np.max(self.w))
self.y1_plot_lims = self.get_conf('y1_plot_lims')
if self.y1_plot_lims is None:
if self.sp_synth_lr is None:
self.y1_plot_lims = (np.min(self.f), np.max(self.f))
else:
mask = (self.w_ori > self.x_plot_lims[0]) & (self.w_ori < self.x_plot_lims[1])
self.y1_plot_lims = (np.min(self.sp_synth_lr[mask]), np.max(self.sp_synth_lr[mask]))
self.y2_plot_lims = self.get_conf('y2_plot_lims')
if self.y2_plot_lims is None:
self.y2_plot_lims = (-0.5, 1,5)
self.y3_plot_lims = self.get_conf('y3_plot_lims')
if self.y3_plot_lims is None:
mask = (self.w_ori > self.x_plot_lims[0]) & (self.w_ori < self.x_plot_lims[1])
self.y3_plot_lims = (np.min((self.f - self.cont)[mask]), np.max((self.f - self.cont)[mask]))
log_.message('Axes initialized', calling=self.calling)
self.print_axes()
def save_axes(self):
if self.ax1 is not None:
self.x_plot_lims = self.ax1.get_xlim()
self.y1_plot_lims = self.ax1.get_ylim()
else:
self.x_plot_lims = None
self.y1_plot_lims = None
if self.ax2 is not None:
self.y2_plot_lims = self.ax2.get_ylim()
else:
self.y2_plot_lims = None
if self.ax3 is not None:
self.y3_plot_lims = self.ax3.get_ylim()
else:
self.y3_plot_lims = None
#log_.message('Axes saved', calling=self.calling)
self.print_axes()
def restore_axes(self):
if self.x_plot_lims is not None:
if self.ax1 is not None:
self.ax1.set_xlim(self.x_plot_lims)
log_.message('X-axes restored to {}'.format(self.ax1.get_xlim()), calling=self.calling)
else:
log_.message('ax1 is None', calling=self.calling)
else:
log_.message('x_plot_lims is None', calling=self.calling)
if self.y1_plot_lims is not None:
if self.ax1 is not None:
self.ax1.set_ylim(self.y1_plot_lims)
if self.y2_plot_lims is not None:
if self.ax2 is not None:
self.ax2.set_ylim(self.y2_plot_lims)
if self.y3_plot_lims is not None:
if self.ax3 is not None:
self.ax3.set_ylim(self.y3_plot_lims)
log_.message('Axes restored', calling=self.calling)
self.print_axes()
def print_axes(self):
log_.debug('{} {} {} {}'.format(self.x_plot_lims, self.y1_plot_lims, self.y2_plot_lims, self.y3_plot_lims), calling=self.calling)
def apply_post_proc(self):
if self.post_proc_file is not None and self.post_proc_file is not "":
try:
user_module = {}
execfile(os.path.abspath(self.directory)+'/'+self.post_proc_file, user_module)
self.post_proc = user_module['post_proc']
log_.message('function post_proc read from {}'.format(self.post_proc_file), calling=self.calling)
except:
self.post_proc = None
log_.warn('function post_proc NOT read from {}'.format(self.post_proc_file), calling=self.calling)
if self.post_proc is not None:
self.post_proc(self.fig1)
def rerun(self):
self.run(do_synth = True, do_read_liste = True, do_profiles=True)
def replot2(self):
self.save_axes()
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.plot2(hr=self.hr, cut=self.get_conf('cut_plot2'), split=self.split,
do_ax2=self.do_ax2, do_ax3=self.do_ax3, do_buttons=self.do_buttons,
fontsize=self.ax2_fontsize, legend_loc=self.legend_loc, fig=self.fig1, call_init_axes=False)
self.fig1.canvas.draw()
def _make_buttons(self, split):
if split:
self.fig1.subplots_adjust(bottom=0.3)
else:
self.fig1.subplots_adjust(bottom=0.2)
self.buttons = {}
b_w = 0.06
b_h = 0.06
b_x0 = 0.05
b_y0 = 0.02
ax_zxm = self.fig1.add_axes([b_x0, b_y0, b_w, b_h])
self.buttons['ZX-'] = Button(ax_zxm, 'ZX-')
self.buttons['ZX-'].on_clicked(self._ZoomZXm)
ax_zxp = self.fig1.add_axes([b_x0, b_y0 + b_h, b_w, b_h])
self.buttons['ZX+'] = Button(ax_zxp, 'ZX+')
self.buttons['ZX+'].on_clicked(self._ZoomZXp)
ax_zym = self.fig1.add_axes([b_x0 + b_w, b_y0, b_w, b_h])
self.buttons['Zy-'] = Button(ax_zym, 'ZY-')
self.buttons['Zy-'].on_clicked(self._ZoomZYm)
ax_zyp = self.fig1.add_axes([b_x0 + b_w, b_y0 + b_h, b_w, b_h])
self.buttons['Zy+'] = Button(ax_zyp, 'ZY+')
self.buttons['Zy+'].on_clicked(self._ZoomZYp)
ax_sxm = self.fig1.add_axes([b_x0 + 2*b_w, b_y0, b_w, b_h])
self.buttons['SX-'] = Button(ax_sxm, 'SX-')
self.buttons['SX-'].on_clicked(self._ZoomSXm)
ax_sxp = self.fig1.add_axes([b_x0 + 2*b_w, b_y0 + b_h, b_w, b_h])
self.buttons['SX+'] = Button(ax_sxp, 'SX+')
self.buttons['SX+'].on_clicked(self._ZoomSXp)
ax_sym = self.fig1.add_axes([b_x0 + 3*b_w, b_y0, b_w, b_h])
self.buttons['Sy-'] = Button(ax_sym, 'SY-')
self.buttons['Sy-'].on_clicked(self._ZoomSYm)
ax_syp = self.fig1.add_axes([b_x0 + 3*b_w, b_y0 + b_h, b_w, b_h])
self.buttons['Sy+'] = Button(ax_syp, 'SY+')
self.buttons['Sy+'].on_clicked(self._ZoomSYp)
ax_curson = self.fig1.add_axes([b_x0 + 5*b_w, b_y0, 2*b_w, b_h])
self.buttons['CursOn'] = Button(ax_curson, 'CursOn')
self.buttons['CursOn'].on_clicked(self._cursOn)
ax_curson = self.fig1.add_axes([b_x0 + 5*b_w, b_y0 + b_h, 2*b_w, b_h])
self.buttons['CursOff'] = Button(ax_curson, 'CursOff')
self.buttons['CursOff'].on_clicked(self._cursOff)
ax_rerun = self.fig1.add_axes([b_x0 + 7*b_w, b_y0 + b_h, 2*b_w, b_h])
self.buttons['Rerun'] = Button(ax_rerun, 'Rerun')
self.buttons['Rerun'].on_clicked(self._call_rerun)
ax_adjust = self.fig1.add_axes([b_x0 + 7*b_w, b_y0, 2*b_w, b_h])
self.buttons['Adjust'] = Button(ax_adjust, 'Adjust')
self.buttons['Adjust'].on_clicked(self._call_adjust)
ax_readobs = self.fig1.add_axes([b_x0 + 9*b_w, b_y0 + b_h, 2*b_w, b_h])
self.buttons['ReadObs'] = Button(ax_readobs, 'ReadObs')
self.buttons['ReadObs'].on_clicked(self._call_readobs)
ax_replot = self.fig1.add_axes([b_x0 + 9*b_w, b_y0, 2*b_w, b_h])
self.buttons['RePlot'] = Button(ax_replot, 'RePlot')
self.buttons['RePlot'].on_clicked(self._call_replot)
def _ZoomZXm(self, event=None):
self._Zoom('ZX-')
def _ZoomZXp(self, event=None):
self._Zoom('ZX+')
def _ZoomZYm(self, event=None):
self._Zoom('ZY-')
def _ZoomZYp(self, event=None):
self._Zoom('ZY+')
def _ZoomSXm(self, event=None):
self._Zoom('SX-')
def _ZoomSXp(self, event=None):
self._Zoom('SX+')
def _ZoomSYm(self, event=None):
self._Zoom('SY-')
def _ZoomSYp(self, event=None):
self._Zoom('SY+')
def _Zoom(self, zoom_direction):
"""
zoom_direction = 'ABC', with A in ['S', 'Z'], B in ['X', 'Y'], and C in ['+', '-']
"""
xmin, xmax = self.ax1.get_xlim()
dx = xmax - xmin
ymin, ymax = self.ax1.get_ylim()
dy = ymax - ymin
if zoom_direction[0] == 'S':
if zoom_direction[2] == '+':
coeff = self.zoom_fact
elif zoom_direction[2] == '-':
coeff = -self.zoom_fact
if zoom_direction[1] == 'X':
xmin += coeff * dx
xmax += coeff * dx
elif zoom_direction[1] == 'Y':
ymin += coeff * dy
ymax += coeff * dy
elif zoom_direction[0] == 'Z':
if zoom_direction[2] == '+':
coeff = self.zoom_fact
elif zoom_direction[2] == '-':
coeff = -self.zoom_fact
if zoom_direction[1] == 'X':
xmin += coeff * dx
xmax -= coeff * dx
elif zoom_direction[1] == 'Y':
ymin += coeff * dy
ymax -= coeff * dy
self.ax1.set_xlim((xmin, xmax))
self.ax1.set_ylim((ymin, ymax))
self.fig1.canvas.draw()
def _cursOn(self, event=None):
self._cid = self.fig1.canvas.mpl_connect('button_press_event', self._curs_onclick)
log_.message('Cursor ON', calling=self.calling)
def _cursOff(self, event=None):
if self._cid is not None:
self.fig1.canvas.mpl_disconnect(self._cid)
log_.message('Cursor OFF', calling=self.calling)
def get_nearby_lines(self, w1, w2, do_print=True, sort='i_tot', reverse=True):
if w1 == None or w2 == None:
return None
w = (w1 + w2)/2
w_lim = abs(w2 - w1)/2
tt = (np.abs(self.liste_raies['lambda'] + self.liste_raies['l_shift'] + self.conf['lambda_shift'] - w) < w_lim)
nearby_lines = self.liste_raies[tt]
i_tot = nearby_lines['i_rel']*nearby_lines['i_cor']
if sort == 'i_tot':
sorts = np.argsort(i_tot)
else:
sorts = np.argsort(nearby_lines[sort])
if reverse:
sorts = sorts[::-1]
nearby_lines = np.array(nearby_lines)[sorts]
if tt.sum() > 0:
if do_print:
print('\n{0:-^45}'.format(' CURSOR on {0:.3f} '.format(w)))
self.print_line(self.liste_raies[tt])
print('-'*45)
return nearby_lines
def nearby_lines(self, event, do_print=True, sort='i_tot', reverse=True):
nearby_lines = None
w = event.xdata
try:
if (w > self.ax1.get_xlim()[1]) or (w < self.ax1.get_xlim()[0]) or (event.button == 2):
self._cursOff()
return None
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
try:
if event.button in (1,3):
if self.firstClick:
self.cursor_w0 = w
self.firstClick = False
else:
self.cursor_w1 = self.cursor_w0
self.cursor_w2 = w
self.firstClick = True
w = (self.cursor_w1+self.cursor_w2)/2
w_lim = self.cursor_width * (self.ax1.get_xlim()[1] - self.ax1.get_xlim()[0])
if abs(self.cursor_w2-w) < w_lim/10:
self.cursor_w1 = self.limit_sp[0]
self.cursor_w2 = self.limit_sp[1]
else:
if abs(self.cursor_w2-w) > w_lim:
w_lim = abs(self.cursor_w2-w)
w_lim = abs(self.cursor_w2-w)
self.cursor_w1 = w - w_lim
self.cursor_w2 = w + w_lim
nearby_lines = self.get_nearby_lines(self.cursor_w1, self.cursor_w2, do_print=do_print)
return nearby_lines
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
def _curs_onclick(self, event):
wl = event.xdata
try:
if (wl > self.ax1.get_xlim()[1]) or (wl < self.ax1.get_xlim()[0]) or (event.button == 2):
self._cursOff()
return None
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
try:
if event.button in (1,3):
wl_lim = self.cursor_width * (self.ax1.get_xlim()[1] - self.ax1.get_xlim()[0])
tt = (np.abs(self.liste_raies['lambda'] + self.liste_raies['l_shift'] + self.conf['lambda_shift'] - wl) < wl_lim)
if tt.sum() > 0:
print('\n{0:-^45}'.format(' CURSOR on {0:.3f} '.format(wl)))
self.print_line(self.liste_raies[tt])
print('-'*45)
except AttributeError:
log_.warn('ax1 not defined', calling=self.calling)
return None
def _call_adjust(self, event=None):
self.adjust()
self.update_plot2()
def _call_rerun(self, event=None):
self.rerun()
self.replot2()
def _call_readobs(self, event=None):
self.init_obs(spectr_obs=None, sp_norm=None, obj_velo=None, limit_sp=self.limit_sp)
self.init_red_corr()
self.make_continuum()
self.sp_synth_tot = self.convol_synth(self.cont, self.sp_synth)
self.cont_lr, self.sp_synth_lr = self.rebin_on_obs()
self.replot2()
def _call_replot(self, event=None):
self.replot2()
def plot_indiv_sp(self, y_shift_coeff=None, legend_zoom=.115):
"""
Seems buggy, loosing ax1.xlim when plotted.
"""
if y_shift_coeff is None:
y_shift_coeff = np.max(self.sp_theo['spectr'])/100.
fig_indiv_spectra = plt.figure()
if self.ax1 is not None:
ax_is = fig_indiv_spectra.add_subplot(111, sharex=self.ax1)
else:
ax_is = fig_indiv_spectra.add_subplot(111)
for i in np.arange(self.n_sp_theo):
label = self.sp_theo['raie_ref']['id'][i]
ax_is.plot(self.w, self.sp_theo['spectr'][i] + y_shift_coeff*(self.n_sp_theo - i), label=label)
ax_is.set_ylim((0, y_shift_coeff*(i+2)))
ax_is.legend(fontsize= i * legend_zoom )
self.fig_indiv_spectra = fig_indiv_spectra
self.ax_indiv_spectra = ax_is
def plot_profile(self):
self.fig_prof = plt.figure()
self.ax_prof = plt.semilogy(self.filter_)
def main_loc(config_file):
"""
In case of not having Qt4.
Usage:
from pyssn.core.spectrum import main_loc
sp = main_loc('./s6302_n_c_init.py')
"""
sp = spectrum(config_file=config_file)
fig = plt.figure(figsize=(20, 7))
sp.plot2(fig=fig)
sp.save_axes()
plt.show()
return sp
def main():
"""
"""
parser = get_parser()
args = parser.parse_args()
if args.file is None:
log_.error('A file name is needed, use option -f')
log_.level = args.verbosity
sp = spectrum(config_file=args.file, post_proc_file=args.post_proc)
fig = plt.figure(figsize=(20, 7))
sp.plot2(fig=fig)
sp.apply_post_proc()
plt.show()
|
gpl-3.0
|
datapythonista/pandas
|
pandas/tests/io/parser/test_dialect.py
|
5
|
4104
|
"""
Tests that dialects are properly handled during parsing
for all of the parsers defined in parsers.py
"""
import csv
from io import StringIO
import pytest
from pandas.errors import ParserWarning
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture
def custom_dialect():
dialect_name = "weird"
dialect_kwargs = {
"doublequote": False,
"escapechar": "~",
"delimiter": ":",
"skipinitialspace": False,
"quotechar": "~",
"quoting": 3,
}
return dialect_name, dialect_kwargs
def test_dialect(all_parsers):
parser = all_parsers
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = parser.read_csv(StringIO(data), dialect=dia)
data = """\
label1,label2,label3
index1,a,c,e
index2,b,d,f
"""
exp = parser.read_csv(StringIO(data))
exp.replace("a", '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(all_parsers):
dialect_name = "mydialect"
parser = all_parsers
data = """\
fruit:vegetable
apple:broccoli
pear:tomato
"""
exp = DataFrame({"fruit": ["apple", "pear"], "vegetable": ["broccoli", "tomato"]})
with tm.with_csv_dialect(dialect_name, delimiter=":"):
df = parser.read_csv(StringIO(data), dialect=dialect_name)
tm.assert_frame_equal(df, exp)
def test_invalid_dialect(all_parsers):
class InvalidDialect:
pass
data = "a\n1"
parser = all_parsers
msg = "Invalid dialect"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dialect=InvalidDialect)
@pytest.mark.parametrize(
"arg",
[None, "doublequote", "escapechar", "skipinitialspace", "quotechar", "quoting"],
)
@pytest.mark.parametrize("value", ["dialect", "default", "other"])
def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, value):
# see gh-23761.
dialect_name, dialect_kwargs = custom_dialect
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
data = "a:b\n1:2"
warning_klass = None
kwds = {}
# arg=None tests when we pass in the dialect without any other arguments.
if arg is not None:
if "value" == "dialect": # No conflict --> no warning.
kwds[arg] = dialect_kwargs[arg]
elif "value" == "default": # Default --> no warning.
from pandas.io.parsers.base_parser import parser_defaults
kwds[arg] = parser_defaults[arg]
else: # Non-default + conflict with dialect --> warning.
warning_klass = ParserWarning
kwds[arg] = "blah"
with tm.with_csv_dialect(dialect_name, **dialect_kwargs):
with tm.assert_produces_warning(warning_klass):
result = parser.read_csv(StringIO(data), dialect=dialect_name, **kwds)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,warning_klass",
[
({"sep": ","}, None), # sep is default --> sep_override=True
({"sep": "."}, ParserWarning), # sep isn't default --> sep_override=False
({"delimiter": ":"}, None), # No conflict
({"delimiter": None}, None), # Default arguments --> sep_override=True
({"delimiter": ","}, ParserWarning), # Conflict
({"delimiter": "."}, ParserWarning), # Conflict
],
ids=[
"sep-override-true",
"sep-override-false",
"delimiter-no-conflict",
"delimiter-default-arg",
"delimiter-conflict",
"delimiter-conflict2",
],
)
def test_dialect_conflict_delimiter(all_parsers, custom_dialect, kwargs, warning_klass):
# see gh-23761.
dialect_name, dialect_kwargs = custom_dialect
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
data = "a:b\n1:2"
with tm.with_csv_dialect(dialect_name, **dialect_kwargs):
with tm.assert_produces_warning(warning_klass):
result = parser.read_csv(StringIO(data), dialect=dialect_name, **kwargs)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
sanketloke/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
glennq/scikit-learn
|
examples/decomposition/plot_ica_blind_source_separation.py
|
349
|
2228
|
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
|
bsd-3-clause
|
raysinensis/tcgaAPP
|
static/scripts/methyl-sig-pull2.py
|
1
|
1225
|
##download data w/ cmd: firehose_get -tasks Clinical_vs_Methylation analyses latest
import os
import tarfile
import csv
import pandas
path=os.getcwd()
cancerlist=os.listdir(".")
#get genes list
folderpath2=path+"/"+"LAML"+"/"
csvpath=folderpath2+"gdac.broadinstitute.org_"+"LAML"+"-TB.Correlate_Clinical_vs_Methylation.Level_4.2016012800.0.0/supp.table1.txt"
csv1 = pandas.read_csv(csvpath,sep='\t')
df=pandas.DataFrame()
df["gene"]=csv1.iloc[:,0]
#pull 3rd column from each
for cancer in cancerlist:
folderpath=path+"/"+cancer+"/20160128/"
folderpath2=path+"/"+cancer+"/"
csvpath=folderpath2+"gdac.broadinstitute.org_"+cancer+"-TP.Correlate_Clinical_vs_Methylation.Level_4.2016012800.0.0/supp.table1.txt"
try:
csv1 = pandas.read_csv(csvpath,sep='\t')
except IOError,e:
csvpath=folderpath2+"gdac.broadinstitute.org_"+cancer+"-TB.Correlate_Clinical_vs_Methylation.Level_4.2016012800.0.0/supp.table1.txt"
try:
csv1 = pandas.read_csv(csvpath,sep='\t')
except IOError,e2:
csvpath=folderpath2+"gdac.broadinstitute.org_"+cancer+"-TM.Correlate_Clinical_vs_Methylation.Level_4.2016012800.0.0/supp.table1.txt"
csv1 = pandas.read_csv(csvpath,sep='\t')
df[cancer]=csv1.iloc[:,1]
df.to_csv("methylation.csv")
|
mit
|
rabipanda/tensorflow
|
tensorflow/examples/learn/text_classification.py
|
30
|
6589
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features, feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.Series(dbpedia.train.data[:, 1])
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.Series(dbpedia.test.data[:, 1])
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
cainiaocome/scikit-learn
|
sklearn/linear_model/setup.py
|
169
|
1567
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/tests/io/formats/test_format.py
|
2
|
118315
|
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import IS64, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# FIXME: remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = pd.MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("max_rows", None):
with option_context("max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("max_rows", 0):
with option_context("max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("max_rows", 0):
with option_context("max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("max_rows", None):
with option_context("max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with pd.option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with pd.option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
{"date": [Timestamp("20130101").tz_localize("UTC")] + [pd.NaT] * 5}
)
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [pd.NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [pd.NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = pd.read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join(re.sub(r"\s+", " ", x).strip() for x in lines[1:])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with pd.option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with pd.option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Emable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def test_to_string_empty_col(self):
# GH 13653
s = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
res = s.to_string(index=False)
exp = " \n Hello\n World\n \n \nMooooo\n \n "
assert re.match(exp, res)
class TestGenericArrayFormatter:
def test_1d_array(self):
# GenericArrayFormatter is used on types for which there isn't a dedicated
# formatter. np.bool_ is one of those types.
obj = fmt.GenericArrayFormatter(np.array([True, False]))
res = obj.get_result()
assert len(res) == 2
# Results should be right-justified.
assert res[0] == " True"
assert res[1] == " False"
def test_2d_array(self):
obj = fmt.GenericArrayFormatter(np.array([[True, False], [False, True]]))
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [True, False]"
assert res[1] == " [False, True]"
def test_3d_array(self):
obj = fmt.GenericArrayFormatter(
np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
)
res = obj.get_result()
assert len(res) == 2
assert res[0] == " [[True, True], [False, False]]"
assert res[1] == " [[False, True], [True, False]]"
def test_2d_extension_type(self):
# GH 33770
# Define a stub extension type with just enough code to run Series.__repr__()
class DtypeStub(pd.api.extensions.ExtensionDtype):
@property
def type(self):
return np.ndarray
@property
def name(self):
return "DtypeStub"
class ExtTypeStub(pd.api.extensions.ExtensionArray):
def __len__(self):
return 2
def __getitem__(self, ix):
return [ix == 1, ix == 0]
@property
def dtype(self):
return DtypeStub()
series = Series(ExtTypeStub())
res = repr(series) # This line crashed before #33770 was fixed.
expected = "0 [False True]\n" + "1 [ True False]\n" + "dtype: DtypeStub"
assert res == expected
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_display_precision_trailing_zeroes(self):
# Issue #20359: trimming zeros while there is no decimal point
# Happens when display precision is set to zero
with pd.option_context("display.precision", 0):
s = Series([840.0, 4200.0])
expected_output = "0 840\n1 4200\ndtype: float64"
assert str(s) == expected_output
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context("display.precision", 6):
# DataFrame example from issue #9764
d = DataFrame(
{
"col1": [
9.999e-8,
1e-7,
1.0001e-7,
2e-7,
4.999e-7,
5e-7,
5.0001e-7,
6e-7,
9.999e-7,
1e-6,
1.0001e-6,
2e-6,
4.999e-6,
5e-6,
5.0001e-6,
6e-6,
]
}
)
expected_output = {
(0, 6): " col1\n"
"0 9.999000e-08\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 6): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 8): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07\n"
"6 5.000100e-07\n"
"7 6.000000e-07",
(8, 16): " col1\n"
"8 9.999000e-07\n"
"9 1.000000e-06\n"
"10 1.000100e-06\n"
"11 2.000000e-06\n"
"12 4.999000e-06\n"
"13 5.000000e-06\n"
"14 5.000100e-06\n"
"15 6.000000e-06",
(9, 16): " col1\n"
"9 0.000001\n"
"10 0.000001\n"
"11 0.000002\n"
"12 0.000005\n"
"13 0.000005\n"
"14 0.000005\n"
"15 0.000006",
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with pd.option_context("display.precision", 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = DataFrame({"x": [12345.6789]})
assert str(df) == " x\n0 12345.6789"
df = DataFrame({"x": [2e6]})
assert str(df) == " x\n0 2000000.0"
df = DataFrame({"x": [12345.6789, 2e6]})
assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="sub_day")
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="long")
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1ns = pd.to_timedelta(1, unit="ns")
drepr = lambda x: x._repr_base(format="all")
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s")
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'0 days 00:00:00'"
assert result[1].strip() == "'0 days 00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s")
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'0 days 00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter:
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range("20130101 09:00:00", periods=5, freq="D"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range("20130101 09:00:00", periods=5, freq="s"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range("20130101 09:00:00", periods=5, freq="ms"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="us"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="N"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime("%Y-%m")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["2016-01", "2016-02"]
def test_datetime64formatter_hoursecond(self):
x = Series(
pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")
)
def format_func(x):
return x.strftime("%H:%M")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["10:10", "12:12"]
class TestNaTFormatting:
def test_repr(self):
assert repr(pd.NaT) == "NaT"
def test_str(self):
assert str(pd.NaT) == "NaT"
class TestDatetimeIndexFormat:
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime([datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT"
)
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode:
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(
pd.to_datetime(
[datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)]
)
)
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp:
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_tz_dateutil(self):
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_nat_representations(self):
for f in (str, repr, methodcaller("isoformat")):
assert f(pd.NaT) == "NaT"
def test_format_percentiles():
result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
expected = ["1.999%", "2.001%", "50%", "66.667%", "99.99%"]
assert result == expected
result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
expected = ["0%", "50%", "2.0%", "50%", "66.67%", "99.99%"]
assert result == expected
msg = r"percentiles should all be in the interval \[0,1\]"
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, np.nan, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([-0.001, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([2, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, 0.5, "a"])
def test_format_percentiles_integer_idx():
# Issue #26660
result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1))
expected = [
"0%",
"10%",
"20%",
"30%",
"40%",
"50%",
"60%",
"70%",
"80%",
"90%",
"100%",
]
assert result == expected
@td.check_file_leaks
def test_repr_html_ipython_config(ip):
code = textwrap.dedent(
"""\
from pandas import DataFrame
df = DataFrame({"A": [1, 2]})
df._repr_html_()
cfg = get_ipython().config
cfg['IPKernelApp']['parent_appname']
df._repr_html_()
"""
)
result = ip.run_cell(code)
assert not result.error_in_exec
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
@pytest.mark.parametrize(
"encoding, data",
[(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")],
)
def test_filepath_or_buffer_arg(
method,
filepath_or_buffer,
assert_filepath_or_buffer_equals,
encoding,
data,
filepath_or_buffer_id,
):
df = DataFrame([data])
if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
with pytest.raises(
ValueError, match="buf is not a file name and encoding is specified."
):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
elif encoding == "foo":
with tm.assert_produces_warning(None):
with pytest.raises(LookupError, match="unknown encoding"):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
else:
expected = getattr(df, method)()
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
assert_filepath_or_buffer_equals(expected)
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
msg = "buf is not a file name and it has no write method"
with pytest.raises(TypeError, match=msg):
getattr(float_frame, method)(buf=object())
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/mpl_toolkits/tests/test_axes_grid.py
|
5
|
1605
|
from matplotlib.testing.decorators import image_comparison
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import matplotlib.pyplot as plt
@image_comparison(baseline_images=['imagegrid_cbar_mode'],
extensions=['png'],
remove_text=True)
def test_imagegrid_cbar_mode_edge():
X, Y = np.meshgrid(np.linspace(0, 6, 30), np.linspace(0, 6, 30))
arr = np.sin(X) * np.cos(Y) + 1j*(np.sin(3*Y) * np.cos(Y/2.))
fig = plt.figure(figsize=(18, 9))
positions = (241, 242, 243, 244, 245, 246, 247, 248)
directions = ['row']*4 + ['column']*4
cbar_locations = ['left', 'right', 'top', 'bottom']*2
for position, direction, location in zip(positions,
directions,
cbar_locations):
grid = ImageGrid(fig, position,
nrows_ncols=(2, 2),
direction=direction,
cbar_location=location,
cbar_size='20%',
cbar_mode='edge')
ax1, ax2, ax3, ax4, = grid
im1 = ax1.imshow(arr.real, cmap='nipy_spectral')
im2 = ax2.imshow(arr.imag, cmap='hot')
im3 = ax3.imshow(np.abs(arr), cmap='jet')
im4 = ax4.imshow(np.arctan2(arr.imag, arr.real), cmap='hsv')
# Some of these colorbars will be overridden by later ones,
# depending on the direction and cbar_location
ax1.cax.colorbar(im1)
ax2.cax.colorbar(im2)
ax3.cax.colorbar(im3)
ax4.cax.colorbar(im4)
|
mit
|
rucka/coursera-introduction-to-data-science
|
KaggleCompetitionPeerReview/Tutorial/python/train.py
|
1
|
2761
|
#import csv as csv
import pandas as pd
import numpy as np
import pylab as P
df = pd.read_csv('../data/train.csv', header=0)
#print df[df.Age > 60][['Sex', 'Pclass', 'Age', 'Survived']]
df['Age'].dropna().hist(bins=16, range=(0,80),alpha = .5)
P.show()
'''
#work on train data
csv_file_object = csv.reader(open('../data/train.csv', 'rb'))
header = csv_file_object.next()
data = []
for row in csv_file_object:
data.append(row)
data = np.array(data)
fare_ceiling = 40
data[data[0::,9].astype(np.float) >= fare_ceiling, 9] = fare_ceiling - 1.0
fare_bracket_size = 10
number_of_price_brackets = fare_ceiling / fare_bracket_size
number_of_classes = len(np.unique(data[0::,2]))
survival_table = np.zeros([2, number_of_classes, number_of_price_brackets], float)
for i in xrange(number_of_classes):
for j in xrange(number_of_price_brackets):
women_only_stats = data[ \
(data[0::,4] == "female") \
&(data[0::,2].astype(np.float) == i+1) \
&(data[0:,9].astype(np.float) >= j*fare_bracket_size) \
&(data[0:,9].astype(np.float) < (j+1)*fare_bracket_size), 1]
men_only_stats = data[ \
(data[0::,4] != "female") \
&(data[0::,2].astype(np.float) == i+1) \
&(data[0:,9].astype(np.float) >= j*fare_bracket_size) \
&(data[0:,9].astype(np.float) < (j+1)*fare_bracket_size), 1]
survival_table[0,i,j] = np.mean(women_only_stats.astype(np.float))
survival_table[1,i,j] = np.mean(men_only_stats.astype(np.float))
survival_table[survival_table != survival_table] = 0
survival_table[survival_table < 0.5] = 0
survival_table[survival_table >= 0.5] = 1
test_file = open('../data/test.csv', 'rb')
test_file_object = csv.reader(test_file)
header = test_file_object.next()
predictions_file = open("genderclassmodel.csv", "wb")
predictions_file_object = csv.writer(predictions_file)
predictions_file_object.writerow(["PassengerId", "Survived"])
for row in test_file_object:
for j in xrange(number_of_price_brackets):
try:
row[8] = float(row[8])
except:
bin_fare = 3 - float(row[1])
break
if row[8] > fare_ceiling:
bin_fare = number_of_price_brackets-1
break
if row[8] >= j*fare_bracket_size and row[8] < (j+1)*fare_bracket_size:
bin_fare = j
break
if row[3] == 'female':
predictions_file_object.writerow([row[0], "%d" % int(survival_table[0, float(row[1]) - 1, bin_fare])])
else:
predictions_file_object.writerow([row[0], "%d" % int(survival_table[1, float(row[1]) - 1, bin_fare])])
test_file.close()
predictions_file.close()
'''
|
apache-2.0
|
russel1237/scikit-learn
|
benchmarks/bench_isotonic.py
|
268
|
3046
|
"""
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
|
bsd-3-clause
|
mvdbeek/tools-iuc
|
tools/repmatch_gff3/repmatch_gff3_util.py
|
22
|
17958
|
import bisect
import csv
import os
import shutil
import sys
import tempfile
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot # noqa: I202,E402
# Graph settings
Y_LABEL = 'Counts'
X_LABEL = 'Number of matched replicates'
TICK_WIDTH = 3
# Amount to shift the graph to make labels fit, [left, right, top, bottom]
ADJUST = [0.180, 0.9, 0.9, 0.1]
# Length of tick marks, use TICK_WIDTH for width
pyplot.rc('xtick.major', size=10.00)
pyplot.rc('ytick.major', size=10.00)
pyplot.rc('lines', linewidth=4.00)
pyplot.rc('axes', linewidth=3.00)
pyplot.rc('font', family='Bitstream Vera Sans', size=32.0)
COLORS = 'krb'
ISPY2 = sys.version_info[0] == 2
class Replicate(object):
def __init__(self, id, dataset_path):
self.id = id
self.dataset_path = dataset_path
if ISPY2:
fh = open(dataset_path, 'rb')
else:
fh = open(dataset_path, 'r', newline='')
self.parse(csv.reader(fh, delimiter='\t'))
def parse(self, reader):
self.chromosomes = {}
for line in reader:
if line[0].startswith("#") or line[0].startswith('"'):
continue
cname, junk, junk, mid, midplus, value, strand, junk, attrs = line
attrs = parse_gff_attrs(attrs)
distance = int(attrs['cw_distance'])
mid = int(mid)
midplus = int(midplus)
value = float(value)
if cname not in self.chromosomes:
self.chromosomes[cname] = Chromosome(cname)
chrom = self.chromosomes[cname]
chrom.add_peak(Peak(cname, mid, value, distance, self))
for chrom in self.chromosomes.values():
chrom.sort_by_index()
def filter(self, up_limit, low_limit):
for chrom in self.chromosomes.values():
chrom.filter(up_limit, low_limit)
def size(self):
return sum([len(c.peaks) for c in self.chromosomes.values()])
class Chromosome(object):
def __init__(self, name):
self.name = name
self.peaks = []
def add_peak(self, peak):
self.peaks.append(peak)
def sort_by_index(self):
self.peaks.sort(key=lambda peak: peak.midpoint)
self.keys = make_keys(self.peaks)
def remove_peak(self, peak):
i = bisect.bisect_left(self.keys, peak.midpoint)
# If the peak was actually found
if i < len(self.peaks) and self.peaks[i].midpoint == peak.midpoint:
del self.keys[i]
del self.peaks[i]
def filter(self, up_limit, low_limit):
self.peaks = [p for p in self.peaks if low_limit <= p.distance <= up_limit]
self.keys = make_keys(self.peaks)
class Peak(object):
def __init__(self, chrom, midpoint, value, distance, replicate):
self.chrom = chrom
self.value = value
self.midpoint = midpoint
self.distance = distance
self.replicate = replicate
def normalized_value(self, med):
return self.value * med / self.replicate.median
class PeakGroup(object):
def __init__(self):
self.peaks = {}
def add_peak(self, repid, peak):
self.peaks[repid] = peak
@property
def chrom(self):
return list(self.peaks.values())[0].chrom
@property
def midpoint(self):
return int(median([peak.midpoint for peak in self.peaks.values()]))
@property
def num_replicates(self):
return len(self.peaks)
@property
def median_distance(self):
return int(median([peak.distance for peak in self.peaks.values()]))
@property
def value_sum(self):
return sum([peak.value for peak in self.peaks.values()])
def normalized_value(self, med):
values = []
for peak in self.peaks.values():
values.append(peak.normalized_value(med))
return median(values)
@property
def peakpeak_distance(self):
keys = list(self.peaks.keys())
return abs(self.peaks[keys[0]].midpoint - self.peaks[keys[1]].midpoint)
class FrequencyDistribution(object):
def __init__(self, d=None):
self.dist = d or {}
def add(self, x):
self.dist[x] = self.dist.get(x, 0) + 1
def graph_series(self):
x = []
y = []
for key, val in self.dist.items():
x.append(key)
y.append(val)
return x, y
def mode(self):
return max(self.dist.items(), key=lambda data: data[1])[0]
def size(self):
return sum(self.dist.values())
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def median(data):
"""
Find the integer median of the data set.
"""
if not data:
return 0
sdata = sorted(data)
if len(data) % 2 == 0:
return (sdata[len(data) // 2] + sdata[len(data) // 2 - 1]) / 2
else:
return sdata[len(data) // 2]
def make_keys(peaks):
return [data.midpoint for data in peaks]
def get_window(chromosome, target_peaks, distance):
"""
Returns a window of all peaks from a replicate within a certain distance of
a peak from another replicate.
"""
lower = list(target_peaks)[0].midpoint
upper = list(target_peaks)[0].midpoint
for peak in target_peaks:
lower = min(lower, peak.midpoint - distance)
upper = max(upper, peak.midpoint + distance)
start_index = bisect.bisect_left(chromosome.keys, lower)
end_index = bisect.bisect_right(chromosome.keys, upper)
return (chromosome.peaks[start_index: end_index], chromosome.name)
def match_largest(window, peak, chrum):
if not window:
return None
if peak.chrom != chrum:
return None
return max(window, key=lambda cpeak: cpeak.value)
def match_closest(window, peak, chrum):
if not window:
return None
if peak.chrom != chrum:
return None
return min(window, key=lambda match: abs(match.midpoint - peak.midpoint))
def frequency_histogram(freqs, dataset_path, labels=[], title=''):
pyplot.clf()
pyplot.figure(figsize=(10, 10))
for i, freq in enumerate(freqs):
xvals, yvals = freq.graph_series()
# Go from high to low
xvals.reverse()
pyplot.bar([x - 0.4 + 0.8 / len(freqs) * i for x in xvals], yvals, width=0.8 / len(freqs), color=COLORS[i])
pyplot.xticks(range(min(xvals), max(xvals) + 1), map(str, reversed(range(min(xvals), max(xvals) + 1))))
pyplot.xlabel(X_LABEL)
pyplot.ylabel(Y_LABEL)
pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])
ax = pyplot.gca()
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markeredgewidth(TICK_WIDTH)
pyplot.savefig(dataset_path)
METHODS = {'closest': match_closest, 'largest': match_largest}
def gff_attrs(l):
if len(l) == 0:
return '.'
return ';'.join('%s=%s' % (tup[0], tup[1]) for tup in l)
def parse_gff_attrs(s):
d = {}
if s == '.':
return d
for item in s.split(';'):
key, val = item.split('=')
d[key] = val
return d
def gff_row(cname, start, end, score, source, stype='.', strand='.', phase='.', attrs=None):
return (cname, source, stype, start, end, score, strand, phase, gff_attrs(attrs or []))
def get_temporary_plot_path():
"""
Return the path to a temporary file with a valid image format
file extension that can be used with bioformats.
"""
tmp_dir = tempfile.mkdtemp(prefix='tmp-repmatch-')
fd, name = tempfile.mkstemp(suffix='.pdf', dir=tmp_dir)
os.close(fd)
return name
def process_files(dataset_paths, galaxy_hids, method, distance, step, replicates, up_limit, low_limit, output_files,
output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram):
output_statistics_histogram_file = output_files in ["all"] and method in ["all"]
if len(dataset_paths) < 2:
return
if method == 'all':
match_methods = METHODS.keys()
else:
match_methods = [method]
for match_method in match_methods:
statistics = perform_process(dataset_paths,
galaxy_hids,
match_method,
distance,
step,
replicates,
up_limit,
low_limit,
output_files,
output_matched_peaks,
output_unmatched_peaks,
output_detail,
output_statistics_table,
output_statistics_histogram)
if output_statistics_histogram_file:
tmp_statistics_histogram_path = get_temporary_plot_path()
frequency_histogram([stat['distribution'] for stat in [statistics]],
tmp_statistics_histogram_path,
METHODS.keys())
shutil.move(tmp_statistics_histogram_path, output_statistics_histogram)
def perform_process(dataset_paths, galaxy_hids, method, distance, step, num_required, up_limit, low_limit, output_files,
output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram):
output_detail_file = output_files in ["all"] and output_detail is not None
output_statistics_table_file = output_files in ["all"] and output_statistics_table is not None
output_unmatched_peaks_file = output_files in ["all", "matched_peaks_unmatched_peaks"] and output_unmatched_peaks is not None
output_statistics_histogram_file = output_files in ["all"] and output_statistics_histogram is not None
replicates = []
for i, dataset_path in enumerate(dataset_paths):
try:
galaxy_hid = galaxy_hids[i]
r = Replicate(galaxy_hid, dataset_path)
replicates.append(r)
except Exception as e:
stop_err('Unable to parse file "%s", exception: %s' % (dataset_path, str(e)))
attrs = 'd%sr%s' % (distance, num_required)
if up_limit != 1000:
attrs += 'u%d' % up_limit
if low_limit != -1000:
attrs += 'l%d' % low_limit
if step != 0:
attrs += 's%d' % step
def td_writer(file_path):
# Returns a tab-delimited writer for a certain output
if ISPY2:
fh = open(file_path, 'wb')
return csv.writer(fh, delimiter='\t')
else:
fh = open(file_path, 'w', newline='')
return csv.writer(fh, delimiter='\t', quoting=csv.QUOTE_NONE)
labels = ('chrom',
'median midpoint',
'median midpoint+1',
'median normalized reads',
'replicates',
'median c-w distance',
'reads sum')
for replicate in replicates:
labels += ('chrom',
'median midpoint',
'median midpoint+1',
'c-w sum',
'c-w distance',
'replicate id')
matched_peaks_output = td_writer(output_matched_peaks)
if output_statistics_table_file:
statistics_table_output = td_writer(output_statistics_table)
statistics_table_output.writerow(('data', 'median read count'))
if output_detail_file:
detail_output = td_writer(output_detail)
detail_output.writerow(labels)
if output_unmatched_peaks_file:
unmatched_peaks_output = td_writer(output_unmatched_peaks)
unmatched_peaks_output.writerow(('chrom', 'midpoint', 'midpoint+1', 'c-w sum', 'c-w distance', 'replicate id'))
# Perform filtering
if up_limit < 1000 or low_limit > -1000:
for replicate in replicates:
replicate.filter(up_limit, low_limit)
# Actually merge the peaks
peak_groups = []
unmatched_peaks = []
freq = FrequencyDistribution()
def do_match(reps, distance):
# Copy list because we will mutate it, but keep replicate references.
reps = reps[:]
while len(reps) > 1:
# Iterate over each replicate as "main"
main = reps[0]
reps.remove(main)
for chromosome in list(main.chromosomes.values()):
peaks_by_value = chromosome.peaks[:]
# Sort main replicate by value
peaks_by_value.sort(key=lambda peak: -peak.value)
def search_for_matches(group):
# Here we use multiple passes, expanding the window to be
# +- distance from any previously matched peak.
while True:
new_match = False
for replicate in reps:
if replicate.id in group.peaks:
# Stop if match already found for this replicate
continue
try:
# Lines changed to remove a major bug by Rohit Reja.
window, chrum = get_window(replicate.chromosomes[chromosome.name], list(group.peaks.values()), distance)
match = METHODS[method](window, peak, chrum)
except KeyError:
continue
if match:
group.add_peak(replicate.id, match)
new_match = True
if not new_match:
break
# Attempt to enlarge existing peak groups
for group in peak_groups:
old_peaks = list(group.peaks.values())
search_for_matches(group)
for peak in list(group.peaks.values()):
if peak not in old_peaks:
peak.replicate.chromosomes[chromosome.name].remove_peak(peak)
# Attempt to find new peaks groups. For each peak in the
# main replicate, search for matches in the other replicates
for peak in peaks_by_value:
matches = PeakGroup()
matches.add_peak(main.id, peak)
search_for_matches(matches)
# Were enough replicates matched?
if matches.num_replicates >= num_required:
for peak in list(matches.peaks.values()):
peak.replicate.chromosomes[chromosome.name].remove_peak(peak)
peak_groups.append(matches)
# Zero or less = no stepping
if step <= 0:
do_match(replicates, distance)
else:
for d in range(0, distance, step):
do_match(replicates, d)
for group in peak_groups:
freq.add(group.num_replicates)
# Collect together the remaining unmatched_peaks
for replicate in replicates:
for chromosome in replicate.chromosomes.values():
for peak in chromosome.peaks:
freq.add(1)
unmatched_peaks.append(peak)
# Average the unmatched_peaks count in the graph by # replicates
med = median([peak.value for group in peak_groups for peak in group.peaks.values()])
for replicate in replicates:
replicate.median = median([peak.value for group in peak_groups for peak in group.peaks.values() if peak.replicate == replicate])
statistics_table_output.writerow((replicate.id, replicate.median))
for group in peak_groups:
# Output matched_peaks (matched pairs).
matched_peaks_output.writerow(gff_row(cname=group.chrom,
start=group.midpoint,
end=group.midpoint + 1,
score=group.normalized_value(med),
source='repmatch',
stype='.',
strand='.',
phase='.',
attrs=[('median_distance', group.median_distance),
('value_sum', group.value_sum),
('replicates', group.num_replicates)]))
if output_detail_file:
matched_peaks = (group.chrom,
group.midpoint,
group.midpoint + 1,
group.normalized_value(med),
group.num_replicates,
group.median_distance,
group.value_sum)
for peak in group.peaks.values():
matched_peaks += (peak.chrom, peak.midpoint, peak.midpoint + 1, peak.value, peak.distance, peak.replicate.id)
detail_output.writerow(matched_peaks)
if output_unmatched_peaks_file:
for unmatched_peak in unmatched_peaks:
unmatched_peaks_output.writerow((unmatched_peak.chrom,
unmatched_peak.midpoint,
unmatched_peak.midpoint + 1,
unmatched_peak.value,
unmatched_peak.distance,
unmatched_peak.replicate.id))
if output_statistics_histogram_file:
tmp_statistics_histogram_path = get_temporary_plot_path()
frequency_histogram([freq], tmp_statistics_histogram_path)
shutil.move(tmp_statistics_histogram_path, output_statistics_histogram)
return {'distribution': freq}
|
mit
|
calebfoss/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py
|
6
|
5044
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
evanbiederstedt/RRBSfun
|
trees/chrom_scripts/cll_chr08.py
|
1
|
8246
|
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr8_"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ['RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("cll_chr08.phy", header=None, index=None)
print(tott.shape)
|
mit
|
navigator8972/vae_hwmotion
|
baxter_writer.py
|
2
|
13089
|
"""
A simulated manipulator based upon Baxter robot
to write given letter trajectory
"""
import os
import sys
import copy
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
from baxter_pykdl_revised import baxter_dynamics
import pylqr.pylqr_trajctrl as plqrtc
import pyrbf_funcapprox as fa
import utils
class BaxterWriter():
def __init__(self):
self.prepare_baxter_robot_manipulator(manip_idx=1)
self.prepare_manipulator_function_approximators()
#the center of block to write the letter... in the world reference frame
self.block_center = np.array([0.844, -0.357, 0.257])
self.scale = 0.09 / (1.5 + 1.5) #boundary of block / dim of trajectories
#this is for the right arm
self.seed_pos = [0.00230, -0.77274, 0.95529, 1.53091, -0.91924, 0.55914, -0.09664]
self.seed_pose = self.robot_dynamics.forward_position_kinematics(self.seed_pos)
return
def prepare_baxter_robot_manipulator(self, manip_idx=1):
path_prefix = os.path.dirname(os.path.abspath(__file__))
#urdf
self.baxter_urdf_path = os.path.join(path_prefix, 'urdf/baxter.urdf')
#default is the right one: 1
if manip_idx == 0:
kin_name = 'left'
else:
kin_name = 'right'
# use revised baxter_pykdl to create inverse kinemtics model
self.robot_dynamics = baxter_dynamics(kin_name, self.baxter_urdf_path)
#print structure
self.robot_dynamics.print_robot_description()
return
def prepare_manipulator_function_approximators(self):
self.func_approxs = [fa.PyRBF_FunctionApproximator(rbf_type='sigmoid', K=20, normalize=True) for dof_idx in range(self.robot_dynamics._num_jnts)]
self.eval_z = np.linspace(0.0, 1.0, 100)
return
def generate_spatial_trajectory(self, char_traj):
"""
Member function to generate spatial trajectory from 2D char trajectory through translation/rotation/scaling
"""
#char_traj is supposed to be a 2D array
spatial_traj = np.zeros((len(char_traj), 3))
spatial_traj[:, 0:2] = char_traj * self.scale
spatial_traj = spatial_traj + self.block_center
return spatial_traj
def derive_jnt_traj_from_fa_parms(self, fa_parms):
jnt_traj = np.array([fa.evaluate(self.eval_z, fa_parm) for fa_parm, fa in zip(fa_parms, self.func_approxs)]).T
return jnt_traj
def derive_cartesian_trajectory_from_fa_parms(self, fa_parms):
jnt_traj = self.derive_jnt_traj_from_fa_parms(fa_parms)
spatial_traj = self.derive_cartesian_trajectory(jnt_traj)
return spatial_traj
def derive_fa_parms_from_jnt_traj(self, q_array):
z = np.linspace(0.0, 1.0, len(q_array))
fa_parms = []
for q_traj, fa in zip(q_array.T, self.func_approxs):
fa_parms.append(fa.fit(z, q_traj, False))
return fa_parms
def derive_ik_trajectory(self, spatial_traj):
# get orientation from the seed pose
q_seed = self.seed_pos
q_array = []
for idx, pnt in enumerate(spatial_traj):
#solve IK
q = self.robot_dynamics.inverse_kinematics(pnt, orientation=self.seed_pose[3:7], seed=q_seed)
if q is None:
print 'Failed to solve IK at step {0} for desired position {1}.'.format(idx, pnt)
else:
q_seed = q
q_array.append(q)
return np.array(q_array)
def derive_cartesian_trajectory(self, q_array):
#derive cartesian trajectory from q_array
spatial_traj = []
for idx, q in enumerate(q_array):
cart_pose = self.robot_dynamics.forward_position_kinematics(q)
spatial_traj.append(cart_pose)
return spatial_traj
def derive_ilqr_trajectory(self, spatial_traj):
dt = .01
#derive joint trajectory from ilqr
lqr_traj_ctrl = plqrtc.PyLQR_TrajCtrl(R=.01, dt=dt)
#cost function
def traj_cost(x, u, t, aux):
#x is the given joint position, evaluate forward kinematics
cart_pose = self.robot_dynamics.forward_position_kinematics(x[0:7])
track_err = np.linalg.norm((cart_pose[0:3] - spatial_traj[t])*np.array([10., 10., 50]))**2
#control effort from inverse dynamics
jnt_mass = self.robot_dynamics.inertia(x[:7])
# jnt_coriolis = self.robot_dynamics.coriolis(x[:7], x[7:])
# jnt_gravity = self.robot_dynamics.gravity(x[:7])
# tau = jnt_mass.dot(u) + jnt_coriolis + jnt_gravity
# tau = jnt_coriolis + jnt_gravity
tau = jnt_mass.dot(u)
#control effort from control input
# tau = u
ctrl_effort = np.linalg.norm(tau) ** 2
return track_err + lqr_traj_ctrl.R_ * ctrl_effort
lqr_traj_ctrl.build_ilqr_general_solver(cost_func=traj_cost, n_dims=7, T=100)
#prepare initial guess of trajectory, from the IK solution
q_ik = self.derive_ik_trajectory(spatial_traj)
x0 = q_ik[0] #init velocity is zero, this is handled in the ilqr_traj_ctrl
q_dot_ik = np.diff(q_ik, axis=0) / dt
q_dot_ik = np.vstack([np.zeros(7), q_dot_ik]) #initial velocity is zero
q_ddot_ik = np.diff(q_dot_ik, axis=0) / dt
u_array_init = q_ddot_ik
#test this init
#note there will be some difference as the trajectory ilqr force the initial velocity as zero
#while the finite difference is not the case
# x_array = lqr_traj_ctrl.ilqr_.forward_propagation(np.concatenate([x0, np.zeros(7)]), u_array_init)
# print q_ik - np.array(x_array)[:, 0:7]
syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0, u_array_init, n_itrs=25)
return syn_traj[:, 0:7]
def build_ik_joint_traj_for_chars(data):
baxter_writer = BaxterWriter()
res_data = defaultdict(list)
for c in data.keys():
print 'Processing character {0}...'.format(c)
for d in data[c]:
tmp_char_traj = np.reshape(d[:-1], (2, -1)).T
#<hyin/May-23rd-2016> also remember to rotate the orientation
tmp_char_traj = np.array([-tmp_char_traj[:, 1], -tmp_char_traj[:, 0]]).T
tmp_spatial_traj = baxter_writer.generate_spatial_trajectory(tmp_char_traj)
tmp_q_array = baxter_writer.derive_ik_trajectory(tmp_spatial_traj)
#note the data is transposed and flattened as the cartesian trajectories
res_data[c].append(np.array(tmp_q_array).T.flatten())
return res_data
def build_ilqr_joint_traj_for_chars(data):
baxter_writer = BaxterWriter()
res_data = defaultdict(list)
for c in data.keys():
print 'Processing character {0}...'.format(c)
for d in data[c]:
tmp_char_traj = np.reshape(d[:-1], (2, -1)).T
#note the x axis is pointing to the frontal side of baxter
#remember to convert it
tmp_char_traj = np.array([-tmp_char_traj[:, 1], -tmp_char_traj[:, 0]]).T
tmp_spatial_traj = baxter_writer.generate_spatial_trajectory(tmp_char_traj)
tmp_q_array = baxter_writer.derive_ilqr_trajectory(tmp_spatial_traj)
#note the data is transposed and flattened as the cartesian trajectories
res_data[c].append(np.array(tmp_q_array).T.flatten())
return res_data
def build_fa_for_joint_trajs(data):
baxter_writer = BaxterWriter()
res_data = defaultdict(list)
for c in data.keys():
print 'Processing character {0}...'.format(c)
for d in data[c]:
tmp_jnt_traj = np.reshape(d, (7, -1)).T
tmp_fa_parms = baxter_writer.derive_fa_parms_from_jnt_traj(tmp_jnt_traj)
res_data[c].append(np.array(tmp_fa_parms).flatten())
return res_data
def check_joint_data(cart_data, jnt_data, n_chars=5, n_samples=1):
baxter_writer = BaxterWriter()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
plt.ion()
check_chars = [np.random.choice(cart_data.keys()) for i in range(n_chars)]
for c in check_chars:
check_indices = [np.random.choice(range(len(cart_data[c]))) for i in range(n_samples)]
#see how it's going for all the samples
for idx in check_indices:
tmp_char_traj = np.reshape(cart_data[c][idx][:-1], (2, -1)).T
tmp_spatial_traj = baxter_writer.generate_spatial_trajectory(tmp_char_traj)
ax.plot(tmp_spatial_traj[:, 0], -tmp_spatial_traj[:, 1], 'k*', linewidth=3.5)
#reconstruction from joint trajectories...
tmp_jnt_traj = np.reshape(jnt_data[c][idx], (7, -1)).T
tmp_cart_array = baxter_writer.derive_cartesian_trajectory(tmp_jnt_traj)
recons_char_traj = np.array([cart_pose[0:2] for cart_pose in tmp_cart_array])
#alignment
recons_char_traj = np.array([-recons_char_traj[:, 1], recons_char_traj[:, 0]]).T
recons_char_traj = recons_char_traj - recons_char_traj[0, 0:2] + np.array([tmp_spatial_traj[0, 0], -tmp_spatial_traj[0, 1]])
z_array = [cart_pose[3] for cart_pose in tmp_cart_array]
print 'Z - mean and std:', np.mean(z_array), np.std(z_array)
#show the reconstructed trajectory
ax.plot(recons_char_traj[:, 0], recons_char_traj[:, 1], 'r', linewidth=3.5)
plt.draw()
return
def check_fa_parms_data(jnt_data, fa_data, n_chars=5, n_samples=1):
baxter_writer = BaxterWriter()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
plt.ion()
check_chars = [np.random.choice(jnt_data.keys()) for i in range(n_chars)]
for c in check_chars:
check_indices = [np.random.choice(range(len(jnt_data[c]))) for i in range(n_samples)]
#see how it's going for all the samples
for idx in check_indices:
tmp_jnt_traj = np.reshape(jnt_data[c][idx], (7, -1)).T
tmp_spatial_traj = np.array(baxter_writer.derive_cartesian_trajectory(tmp_jnt_traj))
ax.plot(tmp_spatial_traj[:, 0], tmp_spatial_traj[:, 1], 'k*', linewidth=3.5)
#reconstruction from joint trajectories...
tmp_fa_parms = np.reshape(fa_data[c][idx], (7, -1))
tmp_cart_array = baxter_writer.derive_cartesian_trajectory_from_fa_parms(tmp_fa_parms)
recons_char_traj = np.array([cart_pose[0:2] for cart_pose in tmp_cart_array])
#alignment
# recons_char_traj = np.array([-recons_char_traj[:, 1], recons_char_traj[:, 0]]).T
recons_char_traj = recons_char_traj - recons_char_traj[0, 0:2] + tmp_spatial_traj[0, 0:2]
z_array = [cart_pose[3] for cart_pose in tmp_cart_array]
print 'Z - mean and std:', np.mean(z_array), np.std(z_array)
#show the reconstructed trajectory
ax.plot(recons_char_traj[:, 0], recons_char_traj[:, 1], 'r', linewidth=3.5)
plt.draw()
return
def main():
baxter_writer = BaxterWriter()
#prepare a circular path
n_pnts = 100
t = np.linspace(0, 2*np.pi, n_pnts)
a = 1.35
b = 1.0
char_traj = np.array([a*np.cos(t), b*np.sin(t)]).T
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
#derive joint trajectory
spatial_traj = baxter_writer.generate_spatial_trajectory(char_traj)
ax.plot(spatial_traj[:, 0], spatial_traj[:, 1], 'k*', linewidth=3.5)
print 'Solving a series of IK problems...'
q_array = baxter_writer.derive_ik_trajectory(spatial_traj)
print 'Finished solving IK problems.'
#restore to cartesian motion
print 'Reconstructing Cartesian trajectory...'
cart_array = baxter_writer.derive_cartesian_trajectory(q_array)
print 'Finished reconstructing Cartesian trajectory.'
#extract 2D trajectory
recons_char_traj = np.array([cart_pose[0:2] for cart_pose in cart_array])
z_array = [cart_pose[2] for cart_pose in cart_array]
print 'IK - Z - mean and std:', np.mean(z_array), np.std(z_array)
#show the reconstructed trajectory
ax.plot(recons_char_traj[:, 0], recons_char_traj[:, 1], 'r', linewidth=3.5)
raw_input('ENTER to continue derive iLQR optimal control')
print 'Solving iLQR optimal trajectories'
q_array_ilqr = baxter_writer.derive_ilqr_trajectory(spatial_traj)
print 'Finished solving iLQR optimal control'
#restore to cartesian motion
print 'Reconstructing Cartesian trajectory...'
cart_array_ilqr = baxter_writer.derive_cartesian_trajectory(q_array_ilqr)
print 'Finished reconstructing Cartesian trajectory.'
#extract 2D trajectory
recons_char_traj_ilqr = np.array([cart_pose[0:2] for cart_pose in cart_array_ilqr])
z_array_ilqr = [cart_pose[2] for cart_pose in cart_array_ilqr]
print 'iLQR - Z - mean and std:', np.mean(z_array_ilqr), np.std(z_array_ilqr)
ax.plot(recons_char_traj_ilqr[:, 0], recons_char_traj_ilqr[:, 1], 'g', linewidth=3.5)
plt.show()
return
if __name__ == '__main__':
main()
|
gpl-3.0
|
mrcslws/htmresearch
|
htmresearch/support/nlp_classification_plotting.py
|
9
|
12290
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file contains plotting tools for NLP experiment results.
"""
import math
import numpy
import os
import pandas as pd
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import (
Data,
ErrorY,
Figure,
Font,
Heatmap,
Layout,
Margin,
Scatter,
XAxis,
YAxis)
class PlotNLP():
"""Class to plot evaluation metrics for NLP experiments."""
def __init__(self,
apiKey=None,
username=None,
experimentName="experiment"):
# Instantiate API credentials.
try:
self.apiKey = apiKey if apiKey else os.environ["PLOTLY_API_KEY"]
except:
print ("Missing PLOTLY_API_KEY environment variable. If you have a "
"key, set it with $ export PLOTLY_API_KEY=api_key\n"
"You can retrieve a key by registering for the Plotly API at "
"http://www.plot.ly")
raise OSError("Missing API key.")
try:
self.username = username if username else os.environ["PLOTLY_USERNAME"]
except:
print ("Missing PLOTLY_USERNAME environment variable. If you have a "
"username, set it with $ export PLOTLY_USERNAME=username\n"
"You can sign up for the Plotly API at http://www.plot.ly")
raise OSError("Missing username.")
py.sign_in(self.username, self.apiKey)
self.experimentName = experimentName
@staticmethod
def getDataFrame(dataPath):
"""Get pandas dataframe of the results CSV."""
try:
return pd.read_csv(dataPath)
except IOError("Invalid data path to file"):
return
@staticmethod
def interpretConfusionMatrixData(dataFrame, normalize):
"""Parse pandas dataframe into confusion matrix format."""
labels = dataFrame.columns.values.tolist()[:-1]
values = map(list, dataFrame.values)
for i, row in enumerate(values):
values[i] = [v/row[-1] for v in row[:-1]] if normalize else row[:-1]
cm = {"x":labels,
"y":labels[:-1],
"z":values[:-1]
}
return cm
def plotCategoryConfusionMatrix(self, data, normalize=True):
"""
Plots the confusion matrix of the input classifications dataframe.
@param data (pandas DF) The confusion matrix.
@param normalize (bool) True will normalize the confusion matrix
values for the total number of actual classifications per label. Thus
the cm values are 0.0 to 1.0.
"""
xyzData = self.interpretConfusionMatrixData(data, normalize)
data = Data([Heatmap(z=xyzData["z"],
x=xyzData["x"],
y=xyzData["y"],
colorscale='YIGnBu')])
layout = Layout(
title='Confusion matrix for ' + self.experimentName,
xaxis=XAxis(
title='Predicted label',
side='top',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='True label',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
autorange='reversed'
),
barmode='overlay',
autosize=True,
width=1000,
height=1000,
margin=Margin(
l=200,
r=80,
b=80,
t=450
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Confusion matrix URL: ", plot_url
def plotConfusionMatrix(self, dataFrame, name):
"""
@param data (pandas DF) The confusion matrix.
@param name (str)
"""
labels = dataFrame.columns.values.tolist()
values = map(list, dataFrame.values)
data = Data([Heatmap(z=values,
x=labels,
y=labels,
colorscale='YIGnBu')])
layout = Layout(
title='Confusion Matrix for ' + name,
xaxis=XAxis(
title='',
side='top',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
autorange='reversed'
),
barmode='overlay',
autosize=True,
width=1000,
height=1000,
margin=Margin(
l=80,
r=80,
b=120,
t=140
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Confusion matrix URL: ", plot_url
def plotRegression(self, xData, yData, title, axisTitles=("x","y"), zData=None, line=None):
"""
Plot a regression of the input data vectors; these must be the same length
lists, where the items are scalar values.
Note: line defaults to None b/c there are better line fitting tools online
(https://plot.ly/how-to-create-a-line-of-best-fits/)
"""
# TODO: use zData for 3D plots
assert(len(xData) == len(yData))
if zData: assert(len(xData) == len(zData))
if line:
assert line in ("linear", "spline")
trace = Scatter(
x=xData,
y=yData,
mode='lines+markers',
marker=dict(
color='rgb(128, 0, 128)',
size=8,
),
line=dict(
shape=line,
color='rgb(128, 0, 128)',
width=1
)
)
else:
trace = Scatter(
x=xData,
y=yData,
mode='markers',
marker=dict(
color='rgb(128, 0, 128)',
size=8,
)
)
layout = Layout(
title=title,
xaxis=dict(
title=axisTitles[0],
),
yaxis=dict(
title=axisTitles[1],
)
)
data = [trace]
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Regression plot URL: ", plot_url
def plotCategoryAccuracies(self, trialAccuracies, trainSizes):
"""
Shows the accuracy for the categories at a certain training size
@param trialAccuracies (dict) A dictionary of dictionaries. For each
train size, there is a dictionary that maps a category to a list of
accuracies for that category.
@param trainSizes (list) Size of training set for each trial.
"""
sizes = sorted(set(trainSizes))
size_sqrt = math.sqrt(len(sizes))
subplotDimension = int(math.ceil(size_sqrt))
rows = subplotDimension
cols = subplotDimension
if len(sizes) <= subplotDimension * (subplotDimension - 1):
rows -= 1
fig = tls.make_subplots(rows=rows, cols=cols,
shared_xaxes=True, shared_yaxes=True, print_grid=False)
num_categories = 0
for i, s in enumerate(sizes):
# 1-indexed
col = i % cols + 1
row = (i - col + 1) / cols + 1
classificationAccuracies = trialAccuracies[s]
num_categories = max(num_categories,len(classificationAccuracies.keys()))
x = []
y = []
std = []
for label, acc in classificationAccuracies.iteritems():
x.append(label)
y.append(numpy.mean(acc))
std.append(numpy.std(acc))
trace = Scatter(
x=x,
y=y,
name=s,
mode='markers',
error_y=ErrorY(
type='data',
symmetric=False,
array=std,
arrayminus=std,
visible=True
)
)
fig.append_trace(trace, row, col)
fig["layout"]["title"] = "Accuracies for category by training size"
half_way_cols = int(math.ceil(cols / 2.0))
half_way_rows = int(math.ceil(rows / 2.0))
fig["layout"]["xaxis{}".format(half_way_cols)]["title"] = "Category Label"
fig["layout"]["yaxis{}".format(half_way_rows)]["title"] = "Accuracy"
for i in xrange(1, cols + 1):
fig["layout"]["xaxis{}".format(i)]["tickangle"] = -45
fig["layout"]["xaxis{}".format(i)]["nticks"] = num_categories * 2
if i <= rows:
fig["layout"]["yaxis{}".format(i)]["range"] = [-.1, 1.1]
fig["layout"]["margin"] = {"b" : 120}
plot_url = py.plot(fig)
print "Category Accuracies URL: ", plot_url
def plotCumulativeAccuracies(self, classificationAccuracies, trainSizes):
"""
Creates scatter plots that show the accuracy for each category at a
certain training size
@param classificationAccuracies (dict) Maps a category label to a list of
lists of accuracies. Each item in the key is a list of accuracies for
a specific training size, ordered by increasing training size.
@param trainSizes (list) Sizes of training sets for trials.
"""
# Convert list of list of accuracies to list of means
classificationSummaries = [(label, map(numpy.mean, acc))
for label, acc in classificationAccuracies.iteritems()]
data = []
sizes = sorted(set(trainSizes))
for label, summary in classificationSummaries:
data.append(Scatter(x=sizes, y=summary, name=label))
data = Data(data)
layout = Layout(
title='Cumulative Accuracies for ' + self.experimentName,
xaxis=XAxis(
title='Training size',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='Accuracy',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig)
print "Cumulative Accuracies URL: ", plot_url
def plotBucketsMetrics(
self, metricsDict, comboMethod, numIterations, modelName):
"""
@param metricsDicts (dict) Arrays for the min, mean, and max of each
metric.
@param comboMethod (str) Concatenation method from the experiment.
@param numIterations (str) Number of inference steps run.
@param modelName (str) Name of tested model.
"""
xData = range(1, numIterations+1)
for metricName, results in metricsDict.iteritems():
if metricName == "totalRanked": continue
minTrace = Scatter(
x = xData,
y = results[0],
mode = "lines+markers",
name = "min"
)
meanTrace = Scatter(
x = xData,
y = results[1],
mode = "lines+markers",
name = "mean"
)
maxTrace = Scatter(
x = xData,
y = results[2],
mode = "lines+markers",
name = "max"
)
data = [minTrace, meanTrace, maxTrace]
layout = Layout(
title="Buckets Experiment for {} ('{}' concatenation) ".format(
modelName, comboMethod),
xaxis=XAxis(
title="Number of samples queried",
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
),
dtick=1
),
yaxis=YAxis(
title=metricName,
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = Figure(data=data, layout=layout)
plotUrl = py.plot(fig)
print "Plot URL for {}: {}".format(metricName, plotUrl)
|
agpl-3.0
|
grundgruen/zipline
|
tests/pipeline/test_numerical_expression.py
|
1
|
16240
|
from operator import (
add,
and_,
ge,
gt,
le,
lt,
methodcaller,
mul,
ne,
or_,
)
from unittest import TestCase
import numpy
from numpy import (
arange,
eye,
float64,
full,
isnan,
zeros,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
)
from zipline.pipeline import Factor
from zipline.pipeline.expression import (
NumericalExpression,
NUMEXPR_MATH_FUNCS,
)
from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype
from zipline.utils.test_utils import check_arrays
class F(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class G(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class H(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class DateFactor(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
class NumericalExpressionTestCase(TestCase):
def setUp(self):
self.dates = date_range('2014-01-01', periods=5, freq='D')
self.assets = Int64Index(range(5))
self.f = F()
self.g = G()
self.h = H()
self.d = DateFactor()
self.fake_raw_data = {
self.f: full((5, 5), 3),
self.g: full((5, 5), 2),
self.h: full((5, 5), 1),
self.d: full((5, 5), 0, dtype='datetime64[ns]'),
}
self.mask = DataFrame(True, index=self.dates, columns=self.assets)
def check_output(self, expr, expected):
result = expr._compute(
[self.fake_raw_data[input_] for input_ in expr.inputs],
self.mask.index,
self.mask.columns,
self.mask.values,
)
check_arrays(result, expected)
def check_constant_output(self, expr, expected):
self.assertFalse(isnan(expected))
return self.check_output(expr, full((5, 5), expected))
def test_validate_good(self):
f = self.f
g = self.g
NumericalExpression("x_0", (f,), dtype=float64_dtype)
NumericalExpression("x_0 ", (f,), dtype=float64_dtype)
NumericalExpression("x_0 + x_0", (f,), dtype=float64_dtype)
NumericalExpression("x_0 + 2", (f,), dtype=float64_dtype)
NumericalExpression("2 * x_0", (f,), dtype=float64_dtype)
NumericalExpression("x_0 + x_1", (f, g), dtype=float64_dtype)
NumericalExpression("x_0 + x_1 + x_0", (f, g), dtype=float64_dtype)
NumericalExpression("x_0 + 1 + x_1", (f, g), dtype=float64_dtype)
def test_validate_bad(self):
f, g, h = self.f, self.g, self.h
# Too few inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", (), dtype=float64_dtype)
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f,), dtype=float64_dtype)
# Too many inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", (f, g), dtype=float64_dtype)
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f, g, h), dtype=float64_dtype)
# Invalid variable name.
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f,), dtype=float64_dtype)
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f, g), dtype=float64_dtype)
# Variable index must start at 0.
with self.assertRaises(ValueError):
NumericalExpression("x_1", (f,), dtype=float64_dtype)
# Scalar operands must be numeric.
with self.assertRaises(TypeError):
"2" + f
with self.assertRaises(TypeError):
f + "2"
with self.assertRaises(TypeError):
f > "2"
# Boolean binary operators must be between filters.
with self.assertRaises(TypeError):
f + (f > 2)
with self.assertRaises(TypeError):
(f > f) > f
def test_combine_datetimes(self):
with self.assertRaises(TypeError) as e:
self.d + self.d
message = e.exception.args[0]
expected = (
"Don't know how to compute datetime64[ns] + datetime64[ns].\n"
"Arithmetic operators are only supported on Factors of dtype "
"'float64'."
)
self.assertEqual(message, expected)
# Confirm that * shows up in the error instead of +.
with self.assertRaises(TypeError) as e:
self.d * self.d
message = e.exception.args[0]
expected = (
"Don't know how to compute datetime64[ns] * datetime64[ns].\n"
"Arithmetic operators are only supported on Factors of dtype "
"'float64'."
)
self.assertEqual(message, expected)
def test_combine_datetime_with_float(self):
# Test with both float-type factors and numeric values.
for float_value in (self.f, float64(1.0), 1.0):
for op, sym in ((add, '+'), (mul, '*')):
with self.assertRaises(TypeError) as e:
op(self.f, self.d)
message = e.exception.args[0]
expected = (
"Don't know how to compute float64 {sym} datetime64[ns].\n"
"Arithmetic operators are only supported on Factors of "
"dtype 'float64'."
).format(sym=sym)
self.assertEqual(message, expected)
with self.assertRaises(TypeError) as e:
op(self.d, self.f)
message = e.exception.args[0]
expected = (
"Don't know how to compute datetime64[ns] {sym} float64.\n"
"Arithmetic operators are only supported on Factors of "
"dtype 'float64'."
).format(sym=sym)
self.assertEqual(message, expected)
def test_negate_datetime(self):
with self.assertRaises(TypeError) as e:
-self.d
message = e.exception.args[0]
expected = (
"Can't apply unary operator '-' to instance of "
"'DateFactor' with dtype 'datetime64[ns]'.\n"
"'-' is only supported for Factors of dtype 'float64'."
)
self.assertEqual(message, expected)
def test_negate(self):
f, g = self.f, self.g
self.check_constant_output(-f, -3.0)
self.check_constant_output(--f, 3.0)
self.check_constant_output(---f, -3.0)
self.check_constant_output(-(f + f), -6.0)
self.check_constant_output(-f + -f, -6.0)
self.check_constant_output(-(-f + -f), 6.0)
self.check_constant_output(f + -g, 1.0)
self.check_constant_output(f - -g, 5.0)
self.check_constant_output(-(f + g) + (f + g), 0.0)
self.check_constant_output((f + g) + -(f + g), 0.0)
self.check_constant_output(-(f + g) + -(f + g), -10.0)
def test_add(self):
f, g = self.f, self.g
self.check_constant_output(f + g, 5.0)
self.check_constant_output((1 + f) + g, 6.0)
self.check_constant_output(1 + (f + g), 6.0)
self.check_constant_output((f + 1) + g, 6.0)
self.check_constant_output(f + (1 + g), 6.0)
self.check_constant_output((f + g) + 1, 6.0)
self.check_constant_output(f + (g + 1), 6.0)
self.check_constant_output((f + f) + f, 9.0)
self.check_constant_output(f + (f + f), 9.0)
self.check_constant_output((f + g) + f, 8.0)
self.check_constant_output(f + (g + f), 8.0)
self.check_constant_output((f + g) + (f + g), 10.0)
self.check_constant_output((f + g) + (g + f), 10.0)
self.check_constant_output((g + f) + (f + g), 10.0)
self.check_constant_output((g + f) + (g + f), 10.0)
def test_subtract(self):
f, g = self.f, self.g
self.check_constant_output(f - g, 1.0) # 3 - 2
self.check_constant_output((1 - f) - g, -4.) # (1 - 3) - 2
self.check_constant_output(1 - (f - g), 0.0) # 1 - (3 - 2)
self.check_constant_output((f - 1) - g, 0.0) # (3 - 1) - 2
self.check_constant_output(f - (1 - g), 4.0) # 3 - (1 - 2)
self.check_constant_output((f - g) - 1, 0.0) # (3 - 2) - 1
self.check_constant_output(f - (g - 1), 2.0) # 3 - (2 - 1)
self.check_constant_output((f - f) - f, -3.) # (3 - 3) - 3
self.check_constant_output(f - (f - f), 3.0) # 3 - (3 - 3)
self.check_constant_output((f - g) - f, -2.) # (3 - 2) - 3
self.check_constant_output(f - (g - f), 4.0) # 3 - (2 - 3)
self.check_constant_output((f - g) - (f - g), 0.0) # (3 - 2) - (3 - 2)
self.check_constant_output((f - g) - (g - f), 2.0) # (3 - 2) - (2 - 3)
self.check_constant_output((g - f) - (f - g), -2.) # (2 - 3) - (3 - 2)
self.check_constant_output((g - f) - (g - f), 0.0) # (2 - 3) - (2 - 3)
def test_multiply(self):
f, g = self.f, self.g
self.check_constant_output(f * g, 6.0)
self.check_constant_output((2 * f) * g, 12.0)
self.check_constant_output(2 * (f * g), 12.0)
self.check_constant_output((f * 2) * g, 12.0)
self.check_constant_output(f * (2 * g), 12.0)
self.check_constant_output((f * g) * 2, 12.0)
self.check_constant_output(f * (g * 2), 12.0)
self.check_constant_output((f * f) * f, 27.0)
self.check_constant_output(f * (f * f), 27.0)
self.check_constant_output((f * g) * f, 18.0)
self.check_constant_output(f * (g * f), 18.0)
self.check_constant_output((f * g) * (f * g), 36.0)
self.check_constant_output((f * g) * (g * f), 36.0)
self.check_constant_output((g * f) * (f * g), 36.0)
self.check_constant_output((g * f) * (g * f), 36.0)
self.check_constant_output(f * f * f * 0 * f * f, 0.0)
def test_divide(self):
f, g = self.f, self.g
self.check_constant_output(f / g, 3.0 / 2.0)
self.check_constant_output(
(2 / f) / g,
(2 / 3.0) / 2.0
)
self.check_constant_output(
2 / (f / g),
2 / (3.0 / 2.0),
)
self.check_constant_output(
(f / 2) / g,
(3.0 / 2) / 2.0,
)
self.check_constant_output(
f / (2 / g),
3.0 / (2 / 2.0),
)
self.check_constant_output(
(f / g) / 2,
(3.0 / 2.0) / 2,
)
self.check_constant_output(
f / (g / 2),
3.0 / (2.0 / 2),
)
self.check_constant_output(
(f / f) / f,
(3.0 / 3.0) / 3.0
)
self.check_constant_output(
f / (f / f),
3.0 / (3.0 / 3.0),
)
self.check_constant_output(
(f / g) / f,
(3.0 / 2.0) / 3.0,
)
self.check_constant_output(
f / (g / f),
3.0 / (2.0 / 3.0),
)
self.check_constant_output(
(f / g) / (f / g),
(3.0 / 2.0) / (3.0 / 2.0),
)
self.check_constant_output(
(f / g) / (g / f),
(3.0 / 2.0) / (2.0 / 3.0),
)
self.check_constant_output(
(g / f) / (f / g),
(2.0 / 3.0) / (3.0 / 2.0),
)
self.check_constant_output(
(g / f) / (g / f),
(2.0 / 3.0) / (2.0 / 3.0),
)
def test_pow(self):
f, g = self.f, self.g
self.check_constant_output(f ** g, 3.0 ** 2)
self.check_constant_output(2 ** f, 2.0 ** 3)
self.check_constant_output(f ** 2, 3.0 ** 2)
self.check_constant_output((f + g) ** 2, (3.0 + 2.0) ** 2)
self.check_constant_output(2 ** (f + g), 2 ** (3.0 + 2.0))
self.check_constant_output(f ** (f ** g), 3.0 ** (3.0 ** 2.0))
self.check_constant_output((f ** f) ** g, (3.0 ** 3.0) ** 2.0)
self.check_constant_output((f ** g) ** (f ** g), 9.0 ** 9.0)
self.check_constant_output((f ** g) ** (g ** f), 9.0 ** 8.0)
self.check_constant_output((g ** f) ** (f ** g), 8.0 ** 9.0)
self.check_constant_output((g ** f) ** (g ** f), 8.0 ** 8.0)
def test_mod(self):
f, g = self.f, self.g
self.check_constant_output(f % g, 3.0 % 2.0)
self.check_constant_output(f % 2.0, 3.0 % 2.0)
self.check_constant_output(g % f, 2.0 % 3.0)
self.check_constant_output((f + g) % 2, (3.0 + 2.0) % 2)
self.check_constant_output(2 % (f + g), 2 % (3.0 + 2.0))
self.check_constant_output(f % (f % g), 3.0 % (3.0 % 2.0))
self.check_constant_output((f % f) % g, (3.0 % 3.0) % 2.0)
self.check_constant_output((f + g) % (f * g), 5.0 % 6.0)
def test_math_functions(self):
f, g = self.f, self.g
fake_raw_data = self.fake_raw_data
alt_fake_raw_data = {
self.f: full((5, 5), .5),
self.g: full((5, 5), -.5),
}
for funcname in NUMEXPR_MATH_FUNCS:
method = methodcaller(funcname)
func = getattr(numpy, funcname)
# These methods have domains in [0, 1], so we need alternate inputs
# that are in the domain.
if funcname in ('arcsin', 'arccos', 'arctanh'):
self.fake_raw_data = alt_fake_raw_data
else:
self.fake_raw_data = fake_raw_data
f_val = self.fake_raw_data[f][0, 0]
g_val = self.fake_raw_data[g][0, 0]
self.check_constant_output(method(f), func(f_val))
self.check_constant_output(method(g), func(g_val))
self.check_constant_output(method(f) + 1, func(f_val) + 1)
self.check_constant_output(1 + method(f), 1 + func(f_val))
self.check_constant_output(method(f + .25), func(f_val + .25))
self.check_constant_output(method(.25 + f), func(.25 + f_val))
self.check_constant_output(
method(f) + method(g),
func(f_val) + func(g_val),
)
self.check_constant_output(
method(f + g),
func(f_val + g_val),
)
def test_comparisons(self):
f, g, h = self.f, self.g, self.h
self.fake_raw_data = {
f: arange(25).reshape(5, 5),
g: arange(25).reshape(5, 5) - eye(5),
h: full((5, 5), 5),
}
f_data = self.fake_raw_data[f]
g_data = self.fake_raw_data[g]
cases = [
# Sanity Check with hand-computed values.
(f, g, eye(5), zeros((5, 5))),
(f, 10, f_data, 10),
(10, f, 10, f_data),
(f, f, f_data, f_data),
(f + 1, f, f_data + 1, f_data),
(1 + f, f, 1 + f_data, f_data),
(f, g, f_data, g_data),
(f + 1, g, f_data + 1, g_data),
(f, g + 1, f_data, g_data + 1),
(f + 1, g + 1, f_data + 1, g_data + 1),
((f + g) / 2, f ** 2, (f_data + g_data) / 2, f_data ** 2),
]
for op in (gt, ge, lt, le, ne):
for expr_lhs, expr_rhs, expected_lhs, expected_rhs in cases:
self.check_output(
op(expr_lhs, expr_rhs),
op(expected_lhs, expected_rhs),
)
def test_boolean_binops(self):
f, g, h = self.f, self.g, self.h
self.fake_raw_data = {
f: arange(25).reshape(5, 5),
g: arange(25).reshape(5, 5) - eye(5),
h: full((5, 5), 5),
}
# Should be True on the diagonal.
eye_filter = f > g
# Should be True in the first row only.
first_row_filter = f < h
eye_mask = eye(5, dtype=bool)
first_row_mask = zeros((5, 5), dtype=bool)
first_row_mask[0] = 1
self.check_output(eye_filter, eye_mask)
self.check_output(first_row_filter, first_row_mask)
for op in (and_, or_): # NumExpr doesn't support xor.
self.check_output(
op(eye_filter, first_row_filter),
op(eye_mask, first_row_mask),
)
|
apache-2.0
|
emd/random_data
|
random_data/spectra2d.py
|
1
|
14055
|
'''This module defines a class for estimating the 2-dimensional
autospectral density of a field. Temporal spectral estimates are
obtained through Welch's method of ensemble averaging overlapped,
windowed FFTs (i.e. nonparametric spectral estimation), while
spatial spectral estimates can be obtained through either
nonparametric (FFT-based) or parametric (Burg autoregression)
means.
'''
# Standard library imports
import string
import numpy as np
from matplotlib import mlab
# Intra-package imports
from .spectra.parametric import BurgAutoSpectralDensity
from .spectra.nonparametric import _plot_image
from .array import SpatialCrossCorrelation
default_fourier_params = {
'window': np.hanning
}
default_burg_params = {
'p': 5,
'Nxi': 100
}
class TwoDimensionalAutoSpectralDensity(object):
'''A class for estimating the 2-dimensional autospectral density
of a field given the complex-valued spatial correlation function,
which e.g. can be computed from an array of (potentially nonuniform)
measurements.
Attributes:
-----------
Sxx - array_like, (`Nxi`, `Nf`)
An array of the estimated autospectral density as a
function of:
- spatial frequency, xi (i.e. 1 / wavelength; `Nxi`), and
- frequency (2nd index, `Nf`).
`Sxx` is normalized such that integrating over all of `Sxx`
yields the total power in the signal `x`.
[Sxx] = [x^2] / ([self.Fs] * [self.Fs_spatial]), where
`x` is the process who's autospectral density `self.Sxx`
has been estimated
xi - array_like, (`Nxi`,)
The spatial-frequency grid. Note that xi = (1 / wavelength) such that
the wavenumber k is related to xi via k = (2 * pi * xi).
[xi] = [self.Fs_spatial]
Fs - float
The temporal sampling rate.
[Fs] = [corr.Fs]
Fs_spatial - float
The "spatial sampling rate", as determined by the spacing between
adjacent points in the correlation function `corr`, provided
at input.
[Fs_spatial] = 1 / [corr.separation]
dxi - float
The spacing of the spatial-frequency grid.
[dxi] = [self.Fs_spatial]
f - array_like, (`Nf`,)
The frequency grid.
[f] = [self.Fs]
df - float
The spacing of the frequency grid.
[df] = [self.Fs]
spatial_method - string
The method used to estimate the spatial content of the
autospectral density, `self.Gxx`.
p - int
The order of the Burg AR. Only present if `spatial_method == 'burg'`.
spatial_window - :py:func, a function of an integer
The tapering window applied to the spatial dimension of the
correlation function `corr` prior to taking the spatial Fourier
transform. Only present (and only applied) if
`spatial_method == 'fourier'`.
The additional attributes:
{`Npts_overlap`, `Npts_per_ens`, `Npts_per_real`,
`Nreal_per_ens`, `detrend`, `dt`, `t`, `window`}
are described in the documentation for :py:class:`SpatialCrossCorrelation
<random_data.array.SpatialCrossCorrelation>`.
Methods:
--------
Type `help(TwoDimensionalAutoSpectralDensity)` in the IPython console
for a listing.
'''
def __init__(
self, corr, spatial_method='burg',
burg_params=default_burg_params,
fourier_params=default_fourier_params):
'''Create an instance of `TwoDimensionalAutoSpectralDensity` class.
Input parameters:
-----------------
corr - :py:class:`SpatialCrossCorrelation
<random_data.array.SpatialCrossCorrelation>` instance
A `SpatialCrossCorrelation` instance that characterizes
the complex-valued, spatial correlation function of the
process under study. Typically, the correlation function
is derived from cross-correlating an array of sensors
that are all measuring the same field.
spatial_method - string
The method to use when estimating the spatial spectral
density. Valid methods are:
- 'fourier': use FFT-based estimation, or
- 'burg': use Burg AR estimation.
Specification of other values will raise a ValueError.
burg_params - dict
A dictionary containing the parameters of relevance
for Burg spectral estimation. Valid dictionary keys
are: {'p', 'Nxi'} where
- p: int, order of Burg AR spectral-density estimate,
- Nxi: int, number of points in the spatial-frequency grid
(note that the spatial spectral estimate is two-sided).
See documentation for :py:class:`BurgAutoSpectralDensity
<random_data.spectra.parametric.BurgAutoSpectralDensity>`
for more information on these parameters.
fourier_params - dict
A dictionary containing the parameters of relevance
for Fourier spectral estimation. Valid dictionary keys
are: {'window'} where
- window: tapering function to be applied to spatial
dimension of correlation function prior to calculating
the FFT; should be a function of the window length,
such as `np.hanning`. If `None`, do not apply a window
to the spatial dimension of the correlation function.
Although the correlation function typically smoothly
tapers to zero on its own, the application of a
window can still suppress leakage, at the cost of
marginally decreased resolution.
'''
# Check that user-provided `corr` is correct type
if not isinstance(corr, SpatialCrossCorrelation):
raise ValueError(
'`corr` must be of type %s'
% SpatialCrossCorrelation)
# Parse requested spatial method
self.spatial_method = string.lower(spatial_method)
implemented_spatial_methods = ['fourier', 'burg']
if self.spatial_method not in set(implemented_spatial_methods):
raise ValueError(
'`spatial_method` must be in %s'
% implemented_spatial_methods)
# Record important aspects of the computation
self.Fs = corr.Fs
self.Fs_spatial = 1. / (corr.separation[1] - corr.separation[0])
self.Npts_per_real = corr.Npts_per_real
self.Nreal_per_ens = corr.Nreal_per_ens
self.Npts_overlap = corr.Npts_overlap
self.Npts_per_ens = corr.Npts_per_ens
self.detrend = corr.detrend
self.window = corr.window
self.f = corr.f.copy()
self.df = corr.df
self.t = corr.t.copy()
self.dt = np.nan
if self.spatial_method == 'burg':
self.p = burg_params['p']
Nxi = burg_params['Nxi']
elif self.spatial_method == 'fourier':
self.window_spatial = fourier_params['window']
# Estimate autospectral density
if self.spatial_method == 'burg':
self._getBurgSpectralDensity(corr, Nxi)
elif self.spatial_method == 'fourier':
self._getFourierSpectralDensity(corr)
def _getFourierSpectralDensity(self, corr):
'''Get 2-d autospectral density estimate by Fourier transforming
the complex-valued, spatial correlation function `corr`. The
autospectral density estimate is normalized such that integrating
over the full spectrum yields the total power in the raw signal.
'''
Npts = len(corr.separation[corr._central_block])
if self.window_spatial is not None:
w = self.window_spatial(Npts)
# Normalize the window such that it has
# equivalent power to a window of ones
# with the same length.
power_loss = np.sum(np.abs(w) ** 2) / Npts
w /= np.sqrt(power_loss)
else:
w = np.ones(Npts)
# The spectral density is the Fourier transform
# of the correlation function. However, note that
# the Fourier transform X(f) of signal x(t) is related
# to the FFT of x(t) via:
#
# X(f) = (1 / Fs) * FFT[x(t)](f),
#
# where Fs is the sampling rate of x(t).
#
# Further, recall that `corr.Gxy` has already been
# Fourier analyzed in time, so we only need to
# Fourier transform in space to obtain the estimated
# autospectral density.
self.Sxx = (1. / self.Fs_spatial) * np.fft.fft(
w[:, np.newaxis] * corr.Gxy[corr._central_block],
axis=0)
self.Sxx = np.fft.fftshift(self.Sxx, axes=0)
# Construct grid for spatial spectral density.
# Note that xi = (1 / wavelength) is the spatial frequency
# such that the wavenumber k is k = 2 * pi * xi.
self.xi = np.fft.fftshift(np.fft.fftfreq(
Npts, d=(1. / self.Fs_spatial)))
self.dxi = self.xi[1] - self.xi[0]
# Normalize spectral density to power in raw signal such that
# integrating over all `np.abs(self.Sxx)` yields total power.
self.Sxx *= self._getNormalizationPrefactor(corr)
return
def _getBurgSpectralDensity(self, corr, Nxi):
'''Get 2-d autospectral density estimate by using a Burg
autoregression of the complex-valued, spatial correlation
function `corr`.
'''
# Initialize a real array to hold autospectral-density estimate,
# as autospectral density should be real-valued
self.Sxx = np.zeros((Nxi, len(self.f)))
# Determine maximum valid separation, `Delta`, of points
# in the correlation function
separation = corr.separation[corr._central_block].copy()
Delta = separation[-1] - separation[0]
# Loop through frequency, estimating spatial autospectral density
# at each using Burg autoregression. Note that this is done in a
# somewhat round-about way. First, we use the Burg AR to estimate
# the autospectral density of the *correlation function*,
# S_{corr}(xi), which, by definition, is equal to
#
# S_{corr}(xi) = (1 / Delta) * E[|FDFT(corr, Delta)|^2],
#
# in the limit that the maximum separation `Delta` in the correlation
# function goes to infinity. Here, `E[...]` is the expectation-value
# operator and `FDFT(x, T)` is the finite duration Fourier transform
# of signal x(t); explicitly
#
# FDFT(x, T) = \int_{0}^{T} dt [e^{-2j * pi * f * t} * x(t)]
#
# Note, however, that our desired autospectral density S_{xx}
# is simply the Fourier transform of the correlation function, i.e.
#
# S_{xx}(xi) = FT[corr(delta)](xi),
#
# where `FT[x(t)](f)` is the Fourier transform of signal `x(t)`.
# Approximating the Fourier transform by the FDFT, we see that
#
# S_{xx}(xi) = FT[corr(delta)](xi),
# \approx FDFT(corr, Delta),
# \approx [Delta * S_{corr}(xi)]^{1/2},
#
# where we have selected the positive root, as S_{xx}(xi) is
# positive semi-definite.
for find in np.arange(len(self.f)):
# Burg AR spectral-density estimate for correlation function.
# Don't waste time with normalization, as it will normalize
# to power in the correlation function, not the raw signal.
# We will handle normalization externally.
asd_burg = BurgAutoSpectralDensity(
self.p,
corr.Gxy[corr._central_block, find],
Fs=self.Fs_spatial,
Nf=Nxi,
normalize=False)
# Compute corresponding autospectral density of process
# underlying the correlation function
self.Sxx[:, find] = np.sqrt(Delta * asd_burg.Sxx)
# Note that xi = (1 / wavelength) is the spatial frequency
# such that the wavenumber k is k = 2 * pi * xi.
self.xi = asd_burg.f
self.dxi = self.xi[1] - self.xi[0]
# Normalize spectral density to power in raw signal such that
# integrating over all `np.abs(self.Sxx)` yields total power.
self.Sxx *= self._getNormalizationPrefactor(corr)
return
def _getNormalizationPrefactor(self, corr):
'''Get multiplicative prefactor for spectral density such that
integrating over all of the resulting spectral density yields
the total power in the raw signal.
'''
ind0sep = np.where(corr.separation == 0)[0][0]
signal_power = np.sum(np.abs(corr.Gxy[ind0sep, :])) * self.df
integrated_power = np.sum(np.abs(self.Sxx)) * self.df * self.dxi
return (signal_power / integrated_power)
def plotSpectralDensity(self, xilim=None, flim=None, vlim=None,
cmap='viridis', interpolation='none', fontsize=16,
title=None, xlabel=r'$\xi$', ylabel='$f$',
cblabel=r'$|S_{xx}(\xi,f)|$',
cborientation='horizontal',
ax=None, fig=None, geometry=111):
'Plot magnitude of spectral density on log scale.'
# Don't plot f = 0 and f = 0.5 * self.Fs, as these tend to
# artificially decrease the dynamic range of the plot
ax = _plot_image(
self.xi, self.f[1:-1], np.abs(self.Sxx[:, 1:-1]).T,
xlim=xilim, ylim=flim, vlim=vlim,
norm='log', cmap=cmap, interpolation=interpolation,
title=title, xlabel=xlabel, ylabel=ylabel,
cblabel=cblabel, cborientation=cborientation,
fontsize=fontsize,
ax=ax, fig=fig, geometry=geometry)
return ax
|
gpl-2.0
|
h2educ/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
98
|
20870
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
|
bsd-3-clause
|
PanDAWMS/panda-server
|
pandaserver/daemons/scripts/recover_lost_files_daemon.py
|
1
|
4606
|
import json
import glob
import time
import os.path
import datetime
import threading
import traceback
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
from pandaserver.config import panda_config
from pandaserver.dataservice import RecoverLostFilesCore
# logger
_logger = PandaLogger().getLogger('recover_lost_files')
# main
def main(tbuf=None, **kwargs):
_logger.debug("===================== start =====================")
# overall timeout value
overallTimeout = 300
# prefix of the files
prefixEVP = 'recov.'
# file pattern of evp files
evpFilePatt = panda_config.cache_dir + '/' + prefixEVP + '*'
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
taskBuffer.init(panda_config.dbhost, panda_config.dbpasswd, nDBConnection=1)
# thread pool
class ThreadPool:
def __init__(self):
self.lock = threading.Lock()
self.list = []
def add(self, obj):
self.lock.acquire()
self.list.append(obj)
self.lock.release()
def remove(self, obj):
self.lock.acquire()
self.list.remove(obj)
self.lock.release()
def join(self):
self.lock.acquire()
thrlist = tuple(self.list)
self.lock.release()
for thr in thrlist:
thr.join()
# thread to ev-pd2p
class EvpThr(threading.Thread):
def __init__(self, lock, pool, tb_if, file_name, to_delete):
threading.Thread.__init__(self)
self.lock = lock
self.pool = pool
self.fileName = file_name
self.to_delete = to_delete
self.taskBuffer = tb_if
self.pool.add(self)
def run(self):
self.lock.acquire()
with open(self.fileName) as f:
ops = json.load(f)
tmpLog = LogWrapper(_logger, '< jediTaskID={} >'.format(ops['jediTaskID']))
tmpLog.info('start {}'.format(self.fileName))
s, o = RecoverLostFilesCore.main(self.taskBuffer, ops, tmpLog)
tmpLog.info('status={}. {}'.format(s, o))
if s is not None or self.to_delete:
tmpLog.debug('delete {}'.format(self.fileName))
try:
os.remove(self.fileName)
except Exception:
pass
self.pool.remove(self)
self.lock.release()
# get files
timeNow = datetime.datetime.utcnow()
timeInt = datetime.datetime.utcnow()
fileList = glob.glob(evpFilePatt)
fileList.sort()
# create thread pool and semaphore
adderLock = threading.Semaphore(1)
adderThreadPool = ThreadPool()
# add
while len(fileList) != 0:
# time limit to aviod too many copyArchve running at the sametime
if (datetime.datetime.utcnow() - timeNow) > datetime.timedelta(minutes=overallTimeout):
_logger.debug("time over in main session")
break
# try to get Semaphore
adderLock.acquire()
# get fileList
if (datetime.datetime.utcnow() - timeInt) > datetime.timedelta(minutes=15):
timeInt = datetime.datetime.utcnow()
# get file
fileList = glob.glob(evpFilePatt)
fileList.sort()
# choose a file
fileName = fileList.pop(0)
# release lock
adderLock.release()
if not os.path.exists(fileName):
continue
try:
modTime = datetime.datetime(*(time.gmtime(os.path.getmtime(fileName))[:7]))
if (timeNow - modTime) > datetime.timedelta(hours=2):
# last chance
_logger.debug("Last attempt : %s" % fileName)
thr = EvpThr(adderLock, adderThreadPool, taskBuffer, fileName, False)
thr.start()
elif (timeInt - modTime) > datetime.timedelta(seconds=5):
# try
_logger.debug("Normal attempt : %s" % fileName)
thr = EvpThr(adderLock, adderThreadPool, taskBuffer, fileName, True)
thr.start()
else:
_logger.debug("Wait %s : %s" % ((timeInt - modTime), fileName))
except Exception as e:
_logger.error("{} {}".format(str(e), traceback.format_exc()))
# join all threads
adderThreadPool.join()
_logger.debug("===================== end =====================")
# run
if __name__ == '__main__':
main()
|
apache-2.0
|
justanotherbrain/HebbLearn
|
objcat-demo.py
|
1
|
5010
|
import sys
import scipy.ndimage
import os.path
import HebbLearn as hl
import numpy as np
import matplotlib.pyplot as plt
fl = hl.NonlinearGHA()
cat_a = '1' #monkey
cat_b = '2' #truck
def resize(data):
tmp = np.reshape(data, (np.shape(data)[0],96,96,3), order='F')
tmp = np.swapaxes(tmp,0,1)
tmp = np.swapaxes(tmp,1,2)
tmp = np.swapaxes(tmp,2,3)
return tmp
def realign(image, filter_size, step_size):
im = fl.ResizeImage(image, filter_size)
#realigned = np.zeros((im.shape[0]*im.shape[1],1))
row_steps = ((im.shape[0] - filter_size)/step_size)+1
col_steps = ((im.shape[1] - filter_size)/step_size)+1
realigned = np.zeros((row_steps*filter_size*col_steps*filter_size,1))
s = 0
fs2 = filter_size*filter_size
for c in range(col_steps):
for r in range(row_steps):
rs = r*step_size
re = r*step_size + filter_size
cs = c*step_size
ce = c*step_size + filter_size
patch = im[rs:re, cs:ce]
map = np.reshape(patch,(fs2,1))
realigned[s*fs2:(s+1)*fs2] = map
return realigned
if os.path.isfile('stl-data/c1_train.npy'):
print('==> Load previously saved textures data')
tmp_a_train = resize(np.load('stl-data/c'+cat_a+'_train.npy'))/255.
tmp_b_train = resize(np.load('stl-data/c'+cat_b+'_train.npy'))/255.
tmp_a_test = resize(np.load('stl-data/c'+cat_a+'_test.npy'))/255.
tmp_b_test = resize(np.load('stl-data/c'+cat_b+'_test.npy'))/255.
n_train = np.shape(tmp_a_train)[3]
n_test = np.shape(tmp_a_test)[3]
print('==> preprocessing data')
a_train = np.zeros((96,96,n_train))
b_train = np.zeros((96,96,n_train))
a_test = np.zeros((96,96,n_test))
b_test = np.zeros((96,96,n_test))
for i in range(n_train):
a_train[:,:,i] = hl.rgb2gray(tmp_a_train[:,:,:,i])
b_train[:,:,i] = hl.rgb2gray(tmp_b_train[:,:,:,i])
for i in range(n_test):
a_test[:,:,i] = hl.rgb2gray(tmp_a_test[:,:,:,i])
b_test[:,:,i] = hl.rgb2gray(tmp_b_test[:,:,:,i])
print('==> mean centering data')
pop_mean = np.mean(np.concatenate((a_train,b_train),axis=2))
a_train = a_train - pop_mean
b_train = b_train - pop_mean
a_test = a_test - pop_mean
b_test = b_test - pop_mean
pop_std = np.std(np.concatenate((a_train,b_train),axis=2))
a_train = a_train/pop_std
b_train = b_train/pop_std
a_test = a_test/pop_std
b_test = b_test/pop_std
if len(sys.argv)>1:
filter_size = int(sys.argv[1])
step_size = int(sys.argv[2])
out_dimension = int(sys.argv[3])
LR = float(sys.argv[4])
n_samples = int(sys.argv[5])
else:
filter_size = 4
step_size = 2
out_dimension = 1
LR = 1
n_samples = 500
nonlinearity = hl.TANH
#LR=.000000001
LR=1
#LR=.5
print('==> Classification performance')
a_vex = np.reshape(np.concatenate((a_train,a_test),axis=2), (96*96,n_train+n_test), order='F').T
b_vex = np.reshape(np.concatenate((b_train,b_test),axis=2), (96*96,n_train+n_test), order='F').T
diff_mean = (np.mean(a_vex[:n_train,:], axis=0) - np.mean(b_vex[:n_train,:], axis=0))
test = np.concatenate((a_vex[n_train:], b_vex[n_train:]), axis=0)
y = np.ones((np.shape(test)[0],1))
y[n_test:]=-1
shuff = np.random.permutation(np.shape(test)[0])
test = test[shuff,:]
y = y[shuff]
corr = 0
print('==> Training')
k_a = fl.Train(a_train[:,:,:n_train], filter_size, step_size, out_dimension, LR, nonlinearity)
k_b = fl.Train(b_train[:,:,:n_train], filter_size, step_size, out_dimension, LR, nonlinearity)
#k_a = k_a[:,:,0]
#k_b = k_b[:,:,0]
kdim = np.shape(k_a)
ka = np.zeros((kdim[0],kdim[1]*kdim[2])) #realign filters
kb = np.zeros((kdim[0],kdim[1]*kdim[2]))
for i in range(kdim[2]):
for j in range(kdim[0]):
ka[j,i*kdim[1]:(i+1)*kdim[1]]=k_a[j,:,i]
kb[j,i*kdim[1]:(i+1)*kdim[1]]=k_b[j,:,i]
ma = np.zeros((kdim[0],kdim[1]*kdim[2]))
mb = np.zeros((kdim[0],kdim[1]*kdim[2]))
for j in range(kdim[0]):
for i in range(n_train):
ma[j,:] = ma[j,:] + np.multiply(ka[j,0], np.tanh(realign(np.reshape(a_vex[i,:], (96,96)), filter_size, step_size).T))
mb[j,:] = mb[j,:] + np.multiply(kb[j,0], np.tanh(realign(np.reshape(b_vex[i,:], (96,96)), filter_size, step_size).T))
diff_mean = ma-mb
k = np.multiply(0.5,ka+kb).T
#w = np.dot(k[:,0],np.diag(diff_mean))
w = np.zeros((kdim[0], kdim[1]*kdim[2]))
for i in range(kdim[0]):
w[i,:] = np.multiply(k[:,i],diff_mean[i,:]) # works since both are vectors
#w = np.multiply(k[:,0],diff_mean)
print('')
print('')
print('==> Testing')
n_test = np.shape((test))[0]
yhs = np.zeros((np.shape(test)[0],1))
for i in range(n_test):
x = realign(np.reshape(test[i,:],(96,96)), filter_size, step_size)
yhat = np.sign(np.dot(w,x))
#yhat = np.sign(np.sum(np.dot(w,x.T)))
if (yhat == y[i]):
corr = corr+1.
pt = ((i+.0)/n_test)*100
sys.stdout.write("\rTesting is %f percent complete" % pt)
sys.stdout.flush()
pc = corr/np.shape(test)[0]
print('')
print('')
if (pc < 0.5):
print('flipped')
pc = 1.0-pc
print('==> Percent Correct')
print(pc)
|
mit
|
jakobworldpeace/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
105
|
29653
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
henrykironde/scikit-learn
|
sklearn/qda.py
|
140
|
7682
|
"""
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
bsd-3-clause
|
siva82kb/SPARC
|
scripts/for_paper.py
|
1
|
7687
|
"""Module for generating plots for the paper."""
import numpy as np
import matplotlib.pyplot as plt
from smoothness import sparc
from smoothness import log_dimensionless_jerk
def sine_rhythmic_movement(T_m, T_r, T_t, ts, skill=1):
# time
t = np.arange(0, T_t, ts)
# Total number of movements
N = int(np.floor(T_t/(2*T_m + 2*T_r)))
t, _movement, _vel, _speed, _move_tag, _rest_tag = sine_rhythmic_movement_by_number(T_m, T_r, N, ts, skill)
return t, _movement, _vel, _speed, _move_tag, _rest_tag, N
def sine_rhythmic_movement_by_number(T_m, T_r, N, ts, skill=1):
# One period of movement and rest
_temp = np.concatenate(
(0.5 - 0.5 * np.cos(np.pi * np.arange(0, T_m, ts) / T_m),
np.ones(T_r/ts),
0.5 + 0.5 * np.cos(np.pi * np.arange(0, T_m, ts) / T_m),
np.zeros(T_r/ts)))
_movement = np.tile(_temp, N)
# tag for movements.
# P -> Q is +1
# Q -> P is -1
# Everything else is 0.
_temp = np.concatenate(
(np.ones(T_m/ts),
np.zeros(T_r/ts),
-1.0 * np.ones(T_m/ts),
np.zeros(T_r/ts)))
_move_tag = np.tile(_temp, N)
# tag for movements.
# @P 1
# @Q -1
# Everything else is 0.
_temp = np.concatenate(
(np.zeros(T_m/ts),
-1 * np.ones(T_r/ts),
np.zeros(T_m/ts),
np.ones(T_r/ts)))
_rest_tag = np.tile(_temp, N)
# If skill is less than 1, then add some random high frequency movements.
for n in xrange(N):
_amp = (1 - skill) * np.random.rand(2)
_freq = np.random.randint(3, 6, 2)
_temp = np.concatenate(
(_amp[0] * np.sin(_freq[0] * np.pi * np.arange(0, T_m, ts) / T_m),
np.zeros(T_r/ts),
_amp[1] * np.sin(_freq[1] * np.pi * np.arange(0, T_m, ts) / T_m),
np.zeros(T_r/ts)))
if n == 0:
_noise = _temp
else:
_noise = np.append(_noise, _temp)
# movement with noise
_movement += _noise
# velcoity and speed of the movement.
_vel = np.zeros(len(_movement))
_vel[1:] = np.diff(_movement) / ts
_speed = np.abs(_vel)
# time
t = np.arange(0, ts * len(_movement), ts)
return t, _movement, _vel, _speed, _move_tag, _rest_tag
def changing_sine_rhythmic_movement(T_t, ts, skill=1):
t = np.arange(0, T_t, ts)
# movement and rest time trends.
_tr_trend = 0.52 * np.power(t - 15.0, 2.0) / 225.
_tm_trend = 0.4 * np.power(t - 15.0, 2.0) / 225. + 0.6
# First movement
_, m, _, _, _, _, _ = sine_rhythmic_movement(T_m=_tm_trend[0],
T_r=_tr_trend[0],
T_t=2 * (_tm_trend[0] +
_tr_trend[0]),
ts=0.01)
# create successive movements and append them to m
_t = len(m) * ts
while _t <= t[-1]:
_inx = np.nonzero(np.abs(t - _t) < 0.001)[0][0]
_T_m = _tm_trend[_inx]
_T_r = _tr_trend[_inx]
_T_t = 2 * (_T_m + _T_r)
_, _temp, _, _, _, _, _ = sine_rhythmic_movement(T_m=_T_m,
T_r=_T_r,
T_t=_T_t,
ts=0.01)
_t += len(_temp) * ts
m = np.append(m, _temp)
# Time for movement, velocity and speed
t = np.arange(0, len(m) * ts, ts)
v = np.zeros(len(m))
v[1:] = np.diff(m) / ts
s = np.abs(v)
return t, m, v, s
def plot_different_tasks(t1, m1, s1, t2, m2, s2, t3, m3, s3):
fig = plt.figure(figsize=(10, 7))
plt.subplot(311)
plt.plot(t1, m1, '0.0', lw=2, label="Position")
plt.plot(t1, 0.75 * s1 / np.max(s1), '0.4', lw=1, label="Speed")
plt.ylabel('Position', fontsize=18)
plt.xticks([], fontsize=18)
plt.yticks([0, 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.5)
plt.title('M1: Rhythmic movement with some dwell-time', fontsize=20)
plt.text(0.5, 1.1, "(A)", fontsize=20)
plt.legend(ncol=2, fontsize=20)
plt.subplot(312)
plt.plot(t2, m2, '0.0', lw=2, label="Position")
plt.plot(t2, 0.75 * s2 / np.max(s2), '0.4', lw=1, label="Speed")
plt.ylabel('Position', fontsize=18)
plt.xticks([], fontsize=18)
plt.yticks([0, 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.5)
plt.title('M2: Rhythmic movement with zero dwell-time', fontsize=20)
plt.text(0.5, 1.1, "(B)", fontsize=20)
plt.legend(ncol=2, fontsize=20)
plt.subplot(313)
plt.plot(t3, m3, '0.0', lw=2, label="Position")
plt.plot(t3, 0.75 * s3 / np.max(s3), '0.4', lw=1, label="Speed")
plt.xlabel('Time (sec)', fontsize=20)
plt.ylabel('Position', fontsize=20)
plt.xticks(fontsize=18)
plt.yticks([0, 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.5)
plt.xlim(0, 30.)
plt.title('M3: Rhythmic movement with changing speed and dwell-time',
fontsize=20)
plt.text(0.5, 1.1, "(C)", fontsize=20)
plt.legend(ncol=2, fontsize=20)
plt.tight_layout()
return fig
def plot_skilled_unskilled_tasks(t1, m1, t2, m2):
fig = plt.figure(figsize=(10, 5))
plt.subplot(211)
plt.plot(t1, m1, '0.0', lw=2, label="Position")
plt.ylabel('Position', fontsize=20)
plt.xticks(fontsize=18)
plt.yticks([0., 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.3)
plt.title('M1a: Rhythmic movement performed by a skilled subject',
fontsize=20)
plt.text(0.5, 1.1, "(A)", fontsize=20)
plt.subplot(212)
plt.plot(t2, m2, '0.0', lw=2, label="Position")
plt.xlabel('Time (sec)', fontsize=20)
plt.ylabel('Position', fontsize=20)
plt.xticks(fontsize=18)
plt.yticks([0., 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.3)
plt.title('M1b: Rhythmic movement performed by a novice subject',
fontsize=20)
plt.text(0.5, 1.1, "(B)", fontsize=20)
plt.tight_layout()
return fig
def plot_three_simple_tasks(t1, m1, t2, m2, t3, m3):
t_max = np.max([t1[-1], t2[-1], t3[-1]])
fig = plt.figure(figsize=(10, 7))
plt.subplot(311)
plt.plot(t1, m1, '0.0', lw=2, label="Position")
plt.ylabel('Position', fontsize=20)
plt.xticks([], fontsize=18)
plt.yticks([0., 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.3)
plt.xlim(0, np.max([t1[-1], t2[-1], t3[-1]]))
plt.title('Ma: Rhythmic movement (expert 1)',
fontsize=20)
plt.text(0.5, 1.1, "(A)", fontsize=20)
plt.subplot(312)
plt.plot(t2, m2, '0.0', lw=2, label="Position")
plt.ylabel('Position', fontsize=20)
plt.xticks([], fontsize=18)
plt.yticks([0., 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.3)
plt.xlim(0, np.max([t1[-1], t2[-1], t3[-1]]))
plt.title('Mb: Rhythmic movement (novice)',
fontsize=20)
plt.text(0.5, 1.1, "(B)", fontsize=20)
plt.subplot(313)
plt.plot(t3, m3, '0.0', lw=2, label="Position")
# # Line for targets P and Q.
# plt.plot([0, t_max], [-0.025, -0.025], '0.0', linestyle='--', lw=0.5)
# plt.plot([0, t_max], [0.025, 0.025], '0.0', linestyle='--', lw=0.5)
# plt.plot([0, t_max], [1.025, 1.025], '0.0', linestyle='--', lw=0.5)
# plt.plot([0, t_max], [0.975, 0.975], '0.0', linestyle='--', lw=0.5)
plt.xlabel('Time (sec)', fontsize=20)
plt.ylabel('Position', fontsize=20)
plt.xticks(fontsize=18)
plt.yticks([0., 0.5, 1.0], fontsize=18)
plt.ylim(-0.1, 1.3)
plt.xlim(0, t_max)
plt.title('Mc: Rhythmic movement (expert 2)',
fontsize=20)
plt.text(0.5, 1.1, "(C)", fontsize=20)
plt.tight_layout()
return fig
|
isc
|
stylianos-kampakis/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py
|
227
|
5170
|
"""
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/pandas/computation/ops.py
|
7
|
15881
|
"""Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
from pandas.types.common import is_list_like, is_scalar
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
from pandas.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
|
mit
|
Guokr1991/seaborn
|
setup.py
|
6
|
3621
|
#! /usr/bin/env python
#
# Copyright (C) 2012-2014 Michael Waskom <mwaskom@stanford.edu>
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Seaborn: statistical data visualization"
LONG_DESCRIPTION = """\
Seaborn is a library for making attractive and informative statistical graphics in Python. It is built on top of matplotlib and tightly integrated with the PyData stack, including support for numpy and pandas data structures and statistical routines from scipy and statsmodels.
Some of the features that seaborn offers are
- Several built-in themes that improve on the default matplotlib aesthetics
- Tools for choosing color palettes to make beautiful plots that reveal patterns in your data
- Functions for visualizing univariate and bivariate distributions or for comparing them between subsets of data
- Tools that fit and visualize linear regression models for different kinds of independent and dependent variables
- Functions that visualize matrices of data and use clustering algorithms to discover structure in those matrices
- A function to plot statistical timeseries data with flexible estimation and representation of uncertainty around the estimate
- High-level abstractions for structuring grids of plots that let you easily build complex visualizations
"""
DISTNAME = 'seaborn'
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = 'mwaskom@stanford.edu'
URL = 'http://stanford.edu/~mwaskom/software/seaborn/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/mwaskom/seaborn/'
VERSION = '0.6.dev'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['seaborn', 'seaborn.external', 'seaborn.tests'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Multimedia :: Graphics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
|
bsd-3-clause
|
ucsd-progsys/nate
|
learning/input.py
|
2
|
1922
|
import pandas as pd
import numpy as np
def load_csv(path, filter_no_labels=False, balance_labels=True, only_slice=False, no_slice=False):
'''Load feature vectors from a csv file.
Expects a header row with feature columns prefixed with 'F-' and
label columns prefixed with 'L-'.
@param filter_no_labels: if True, filter out samples where all labels are 0.
@param balance_labels: if True, balance the count of samples from
each class of labels, by duplicating samples from under-represented
classes.
@return: (dataframe, feature names, label names)
'''
df = pd.read_csv(path)
label_names = [c for c in df.columns if c[0] == 'L']
feature_names = [c for c in df.columns if c[0] == 'F']
if filter_no_labels:
# print df.shape
# filter out vectors with no predictions
criteria = (df[l] == 1.0 for l in label_names)
df = df[reduce(lambda x, acc: x | acc, criteria)]
# print df.shape
if only_slice:
if len(df[df['L-DidChange'] == 1]) == 0:
print 'no changes', path
df = None
return (df, feature_names, label_names)
df = df[df['F-InSlice'] == 1].reset_index(drop=True)
if len(df) == 1:
print '1 sliced', path
df = None
return (df, feature_names, label_names)
if len(df[df['L-DidChange'] == 1]) == 0:
print 'no overlap', path
df = None
if no_slice or only_slice:
#del df['F-InSlice']
feature_names = [f for f in feature_names if f != 'F-InSlice']
# if balance_labels:
# print df.shape
# classes = df.groupby(label_names)
# max_samples = max(len(c) for _, c in classes)
# print max_samples
# df = pd.concat(c.sample(max_samples, replace=True) for _, c in classes)
# print df.shape
return (df, feature_names, label_names)
|
bsd-3-clause
|
Cadair/ginga
|
ginga/cmap.py
|
2
|
507866
|
#
# cmap.py -- color maps for fits viewing
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import numpy
from ginga.util.six.moves import map, zip
# Some built in colormaps
cmap_soss = (
(0.000000, 0.000000, 0.000000),
(0.003922, 0.003007, 0.000000),
(0.007843, 0.006013, 0.000000),
(0.011765, 0.009020, 0.000000),
(0.015686, 0.012026, 0.000000),
(0.019608, 0.015033, 0.000000),
(0.023529, 0.018039, 0.000000),
(0.027451, 0.021046, 0.000000),
(0.031373, 0.024052, 0.000000),
(0.035294, 0.027059, 0.000000),
(0.039216, 0.030065, 0.000000),
(0.043137, 0.033072, 0.000000),
(0.047059, 0.036078, 0.000000),
(0.050980, 0.039085, 0.000000),
(0.054902, 0.042092, 0.000000),
(0.058824, 0.045098, 0.000000),
(0.062745, 0.048105, 0.000000),
(0.066667, 0.051111, 0.000000),
(0.070588, 0.054118, 0.000000),
(0.074510, 0.057124, 0.000000),
(0.078431, 0.060131, 0.000000),
(0.082353, 0.063137, 0.000000),
(0.086275, 0.066144, 0.000000),
(0.090196, 0.069150, 0.000000),
(0.094118, 0.072157, 0.000000),
(0.098039, 0.075163, 0.000000),
(0.101961, 0.078170, 0.000000),
(0.105882, 0.081176, 0.000000),
(0.109804, 0.084183, 0.000000),
(0.113725, 0.087190, 0.000000),
(0.117647, 0.090196, 0.000000),
(0.121569, 0.093203, 0.000000),
(0.125490, 0.096209, 0.000000),
(0.129412, 0.099216, 0.000000),
(0.133333, 0.102222, 0.000000),
(0.137255, 0.105229, 0.000000),
(0.141176, 0.108235, 0.000000),
(0.145098, 0.111242, 0.000000),
(0.149020, 0.114248, 0.000000),
(0.152941, 0.117255, 0.000000),
(0.156863, 0.120261, 0.000000),
(0.160784, 0.123268, 0.000000),
(0.164706, 0.126275, 0.000000),
(0.168627, 0.129281, 0.000000),
(0.172549, 0.132288, 0.000000),
(0.176471, 0.135294, 0.000000),
(0.180392, 0.138301, 0.000000),
(0.184314, 0.141307, 0.000000),
(0.188235, 0.144314, 0.000000),
(0.192157, 0.147320, 0.000000),
(0.196078, 0.150327, 0.000000),
(0.200000, 0.153333, 0.000000),
(0.203922, 0.156340, 0.000000),
(0.207843, 0.159346, 0.000000),
(0.211765, 0.162353, 0.000000),
(0.215686, 0.165359, 0.000000),
(0.219608, 0.168366, 0.000000),
(0.223529, 0.171373, 0.000000),
(0.227451, 0.174379, 0.000000),
(0.231373, 0.177386, 0.000000),
(0.235294, 0.180392, 0.000000),
(0.239216, 0.183399, 0.000000),
(0.243137, 0.186405, 0.000000),
(0.247059, 0.189412, 0.000000),
(0.250980, 0.192418, 0.000000),
(0.254902, 0.195425, 0.000000),
(0.258824, 0.198431, 0.000000),
(0.262745, 0.201438, 0.000000),
(0.266667, 0.204444, 0.000000),
(0.270588, 0.207451, 0.000000),
(0.274510, 0.210458, 0.000000),
(0.278431, 0.213464, 0.000000),
(0.282353, 0.216471, 0.000000),
(0.286275, 0.219477, 0.000000),
(0.290196, 0.222484, 0.000000),
(0.294118, 0.225490, 0.000000),
(0.298039, 0.228497, 0.000000),
(0.301961, 0.231503, 0.000000),
(0.305882, 0.234510, 0.000000),
(0.309804, 0.237516, 0.000000),
(0.313725, 0.240523, 0.000000),
(0.317647, 0.243529, 0.000000),
(0.321569, 0.246536, 0.000000),
(0.325490, 0.249542, 0.000000),
(0.329412, 0.252549, 0.000000),
(0.333333, 0.255556, 0.000000),
(0.337255, 0.258562, 0.000000),
(0.341176, 0.261569, 0.000000),
(0.345098, 0.264575, 0.000000),
(0.349020, 0.267582, 0.000000),
(0.352941, 0.270588, 0.000000),
(0.356863, 0.273595, 0.000000),
(0.360784, 0.276601, 0.000000),
(0.364706, 0.279608, 0.000000),
(0.368627, 0.282614, 0.000000),
(0.372549, 0.285621, 0.000000),
(0.376471, 0.288627, 0.000000),
(0.380392, 0.291634, 0.000000),
(0.384314, 0.294641, 0.000000),
(0.388235, 0.297647, 0.000000),
(0.392157, 0.300654, 0.000000),
(0.396078, 0.303660, 0.000000),
(0.400000, 0.306667, 0.000000),
(0.403922, 0.309673, 0.000000),
(0.407843, 0.312680, 0.000000),
(0.411765, 0.315686, 0.000000),
(0.415686, 0.318693, 0.000000),
(0.419608, 0.321699, 0.000000),
(0.423529, 0.324706, 0.000000),
(0.427451, 0.327712, 0.000000),
(0.431373, 0.330719, 0.000000),
(0.435294, 0.333725, 0.000000),
(0.439216, 0.336732, 0.000000),
(0.443137, 0.339739, 0.000000),
(0.447059, 0.342745, 0.000000),
(0.450980, 0.345752, 0.000000),
(0.454902, 0.348758, 0.000000),
(0.458824, 0.351765, 0.000000),
(0.462745, 0.354771, 0.000000),
(0.466667, 0.357778, 0.000000),
(0.470588, 0.360784, 0.000000),
(0.474510, 0.363791, 0.000000),
(0.478431, 0.366797, 0.000000),
(0.482353, 0.369804, 0.000000),
(0.486275, 0.372810, 0.000000),
(0.490196, 0.375817, 0.000000),
(0.494118, 0.378824, 0.000000),
(0.498039, 0.381830, 0.000000),
(0.501961, 0.384837, 0.000000),
(0.505882, 0.387843, 0.000000),
(0.509804, 0.390850, 0.000000),
(0.513725, 0.393856, 0.000000),
(0.517647, 0.396863, 0.000000),
(0.521569, 0.399869, 0.000000),
(0.525490, 0.402876, 0.000000),
(0.529412, 0.405882, 0.000000),
(0.533333, 0.408889, 0.000000),
(0.537255, 0.411895, 0.000000),
(0.541176, 0.414902, 0.000000),
(0.545098, 0.417908, 0.000000),
(0.549020, 0.420915, 0.000000),
(0.552941, 0.423922, 0.000000),
(0.556863, 0.426928, 0.000000),
(0.560784, 0.429935, 0.000000),
(0.564706, 0.432941, 0.000000),
(0.568627, 0.435948, 0.000000),
(0.572549, 0.438954, 0.000000),
(0.576471, 0.441961, 0.000000),
(0.580392, 0.444967, 0.000000),
(0.584314, 0.447974, 0.000000),
(0.588235, 0.450980, 0.000000),
(0.592157, 0.453987, 0.000000),
(0.596078, 0.456993, 0.000000),
(0.600000, 0.460000, 0.000000),
(0.603922, 0.463007, 0.000000),
(0.607843, 0.466013, 0.000000),
(0.611765, 0.469020, 0.000000),
(0.615686, 0.472026, 0.000000),
(0.619608, 0.475033, 0.000000),
(0.623529, 0.478039, 0.000000),
(0.627451, 0.481046, 0.000000),
(0.631373, 0.484052, 0.000000),
(0.635294, 0.487059, 0.000000),
(0.639216, 0.490065, 0.000000),
(0.643137, 0.493072, 0.000000),
(0.647059, 0.496078, 0.000000),
(0.650980, 0.499085, 0.000000),
(0.654902, 0.502092, 0.000000),
(0.658824, 0.505098, 0.000000),
(0.662745, 0.508105, 0.000000),
(0.666667, 0.511111, 0.000000),
(0.670588, 0.514118, 0.000000),
(0.674510, 0.517124, 0.000000),
(0.678431, 0.520131, 0.000000),
(0.682353, 0.523137, 0.000000),
(0.686275, 0.526144, 0.000000),
(0.690196, 0.529150, 0.000000),
(0.694118, 0.532157, 0.000000),
(0.698039, 0.535163, 0.000000),
(0.701961, 0.538170, 0.000000),
(0.705882, 0.541176, 0.000000),
(0.709804, 0.544183, 0.000000),
(0.713725, 0.547190, 0.000000),
(0.717647, 0.550196, 0.000000),
(0.721569, 0.553203, 0.000000),
(0.725490, 0.556209, 0.000000),
(0.729412, 0.559216, 0.000000),
(0.733333, 0.562222, 0.000000),
(0.737255, 0.565229, 0.000000),
(0.741176, 0.568235, 0.000000),
(0.745098, 0.571242, 0.000000),
(0.749020, 0.574248, 0.000000),
(0.752941, 0.577255, 0.000000),
(0.756863, 0.580261, 0.000000),
(0.760784, 0.583268, 0.000000),
(0.764706, 0.586275, 0.000000),
(0.768627, 0.589281, 0.000000),
(0.772549, 0.592288, 0.000000),
(0.776471, 0.595294, 0.000000),
(0.780392, 0.598301, 0.000000),
(0.784314, 0.601307, 0.000000),
(0.788235, 0.604314, 0.000000),
(0.792157, 0.607320, 0.000000),
(0.796078, 0.610327, 0.000000),
(0.800000, 0.613333, 0.000000),
(0.803922, 0.616340, 0.000000),
(0.807843, 0.619346, 0.000000),
(0.811765, 0.622353, 0.000000),
(0.815686, 0.625359, 0.000000),
(0.819608, 0.628366, 0.000000),
(0.823529, 0.631373, 0.000000),
(0.827451, 0.634379, 0.000000),
(0.831373, 0.637386, 0.000000),
(0.835294, 0.640392, 0.000000),
(0.839216, 0.643399, 0.000000),
(0.843137, 0.646405, 0.000000),
(0.847059, 0.649412, 0.000000),
(0.850980, 0.652418, 0.000000),
(0.854902, 0.655425, 0.000000),
(0.858824, 0.658431, 0.000000),
(0.862745, 0.661438, 0.000000),
(0.866667, 0.664444, 0.000000),
(0.870588, 0.677451, 0.000000),
(0.874510, 0.680458, 0.000000),
(0.878431, 0.693464, 0.000000),
(0.882353, 0.706471, 0.000000),
(0.886275, 0.719477, 0.000000),
(0.890196, 0.722484, 0.000000),
(0.894118, 0.735490, 0.000000),
(0.898039, 0.748497, 0.000000),
(0.901961, 0.751503, 0.000000),
(0.905882, 0.764510, 0.000000),
(0.909804, 0.777516, 0.000000),
(0.913725, 0.780523, 0.000000),
(0.917647, 0.793529, 0.000000),
(0.921569, 0.806536, 0.000000),
(0.925490, 0.819542, 0.000000),
(0.929412, 0.822549, 0.000000),
(0.933333, 0.835556, 0.000000),
(0.937255, 0.848562, 0.000000),
(0.941176, 0.851569, 0.000000),
(0.945098, 0.864575, 0.000000),
(0.949020, 0.877582, 0.000000),
(0.952941, 0.880588, 0.000000),
(0.956863, 0.893595, 0.000000),
(0.960784, 0.906601, 0.000000),
(0.964706, 0.919608, 0.000000),
(0.968627, 0.922614, 0.000000),
(0.972549, 0.935621, 0.000000),
(0.976471, 0.948627, 0.000000),
(0.980392, 0.951634, 0.000000),
(0.984314, 0.964641, 0.000000),
(0.988235, 0.977647, 0.000000),
(0.992157, 0.980654, 0.000000),
(0.996078, 0.993660, 0.000000),
(1.00000, 1.00000, 1.00000),
)
cmap_idl2 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.14118, 0.00000),
(0.00000, 0.28235, 0.00000),
(0.00000, 0.29412, 0.00000),
(0.00000, 0.30980, 0.00000),
(0.00000, 0.32157, 0.00000),
(0.00000, 0.33725, 0.00000),
(0.00000, 0.35294, 0.00000),
(0.00000, 0.36471, 0.00000),
(0.00000, 0.38039, 0.00000),
(0.00000, 0.39216, 0.00000),
(0.00000, 0.40784, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.45882, 0.00000),
(0.00000, 0.49412, 0.00000),
(0.00000, 0.52941, 0.00000),
(0.00000, 0.56471, 0.00000),
(0.00000, 0.60000, 0.00000),
(0.00000, 0.63529, 0.00000),
(0.00000, 0.67059, 0.00000),
(0.00000, 0.70588, 0.00000),
(0.00000, 0.74118, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.81176, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.02353, 0.97647, 0.00000),
(0.04706, 0.96471, 0.00000),
(0.07059, 0.95294, 0.00000),
(0.09412, 0.94118, 0.00000),
(0.11765, 0.91765, 0.00000),
(0.14118, 0.89412, 0.00000),
(0.16471, 0.87059, 0.00000),
(0.18824, 0.84706, 0.00000),
(0.21176, 0.82353, 0.00000),
(0.23529, 0.80000, 0.00000),
(0.25882, 0.77647, 0.00000),
(0.28235, 0.75294, 0.00000),
(0.30588, 0.72941, 0.00000),
(0.32941, 0.70588, 0.00000),
(0.35294, 0.68235, 0.00000),
(0.37647, 0.65882, 0.00000),
(0.40000, 0.63529, 0.00000),
(0.42353, 0.61176, 0.00000),
(0.44706, 0.58824, 0.00000),
(0.47059, 0.56471, 0.00000),
(0.49412, 0.54118, 0.00000),
(0.51765, 0.51765, 0.00000),
(0.54118, 0.49412, 0.00000),
(0.56471, 0.47059, 0.00000),
(0.58824, 0.44706, 0.00000),
(0.61176, 0.42353, 0.00000),
(0.63529, 0.40000, 0.00000),
(0.65882, 0.37647, 0.00000),
(0.68235, 0.35294, 0.00000),
(0.70588, 0.32941, 0.00000),
(0.72941, 0.30588, 0.00000),
(0.75294, 0.28235, 0.00000),
(0.77647, 0.25882, 0.00000),
(0.80000, 0.23529, 0.00000),
(0.82353, 0.21176, 0.00000),
(0.84706, 0.18824, 0.00000),
(0.87059, 0.16471, 0.00000),
(0.89412, 0.14118, 0.00000),
(0.91765, 0.11765, 0.00000),
(0.94118, 0.09412, 0.00000),
(0.95294, 0.07059, 0.00000),
(0.96471, 0.04706, 0.00000),
(0.97647, 0.02353, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00392),
(0.98431, 0.00000, 0.01176),
(0.98039, 0.00000, 0.01961),
(0.97647, 0.00000, 0.02745),
(0.97255, 0.00000, 0.03529),
(0.97255, 0.00000, 0.03922),
(0.97255, 0.00000, 0.04706),
(0.97255, 0.00000, 0.05490),
(0.97255, 0.00000, 0.06275),
(0.96863, 0.00000, 0.07059),
(0.96471, 0.00000, 0.07843),
(0.96078, 0.00000, 0.08627),
(0.95686, 0.00000, 0.09804),
(0.95294, 0.00000, 0.10588),
(0.94902, 0.00000, 0.11373),
(0.94510, 0.00000, 0.12157),
(0.94118, 0.00000, 0.13333),
(0.94118, 0.00000, 0.13725),
(0.93725, 0.00000, 0.14510),
(0.93333, 0.00000, 0.15294),
(0.92941, 0.00000, 0.16078),
(0.92549, 0.00000, 0.16863),
(0.92549, 0.00000, 0.17647),
(0.92549, 0.00000, 0.18431),
(0.92549, 0.00000, 0.19608),
(0.92157, 0.00000, 0.20392),
(0.91765, 0.00000, 0.21176),
(0.91373, 0.00000, 0.21961),
(0.90980, 0.00000, 0.23137),
(0.90588, 0.00000, 0.23922),
(0.90196, 0.00000, 0.24706),
(0.89804, 0.00000, 0.25490),
(0.89412, 0.00000, 0.26275),
(0.89412, 0.00000, 0.26667),
(0.89412, 0.00000, 0.27451),
(0.89412, 0.00000, 0.28235),
(0.89412, 0.00000, 0.29020),
(0.89020, 0.00000, 0.29804),
(0.88627, 0.00000, 0.30588),
(0.88235, 0.00000, 0.31373),
(0.87843, 0.00000, 0.32549),
(0.87451, 0.00000, 0.33333),
(0.87059, 0.00000, 0.34118),
(0.86667, 0.00000, 0.34902),
(0.86275, 0.00392, 0.36078),
(0.85882, 0.00392, 0.36863),
(0.85490, 0.00392, 0.37647),
(0.85098, 0.00392, 0.38431),
(0.84706, 0.00000, 0.39608),
(0.84706, 0.00000, 0.40000),
(0.84706, 0.00000, 0.40784),
(0.84706, 0.00000, 0.41569),
(0.84706, 0.00000, 0.42353),
(0.84314, 0.00000, 0.43137),
(0.83922, 0.00000, 0.43922),
(0.83529, 0.00000, 0.44706),
(0.83137, 0.00000, 0.45490),
(0.82745, 0.00000, 0.46275),
(0.82353, 0.00000, 0.47059),
(0.81961, 0.00000, 0.47843),
(0.81569, 0.00000, 0.49020),
(0.81176, 0.00000, 0.49804),
(0.80784, 0.00000, 0.50588),
(0.80392, 0.00000, 0.51373),
(0.80000, 0.00000, 0.52549),
(0.80000, 0.00000, 0.52941),
(0.80000, 0.00000, 0.53725),
(0.80000, 0.00000, 0.54510),
(0.80000, 0.00000, 0.55294),
(0.79608, 0.00000, 0.56078),
(0.79216, 0.00000, 0.56863),
(0.78824, 0.00000, 0.57647),
(0.78431, 0.00000, 0.58824),
(0.78039, 0.00000, 0.59608),
(0.77647, 0.00000, 0.60392),
(0.77255, 0.00000, 0.61176),
(0.76863, 0.00000, 0.62353),
(0.76863, 0.00000, 0.62745),
(0.76863, 0.00000, 0.63529),
(0.76863, 0.00000, 0.63922),
(0.76863, 0.00000, 0.64706),
(0.76471, 0.00000, 0.65490),
(0.76078, 0.00000, 0.66275),
(0.75686, 0.00000, 0.67059),
(0.75294, 0.00000, 0.68235),
(0.74902, 0.00000, 0.69020),
(0.74510, 0.00000, 0.69804),
(0.74118, 0.00000, 0.70588),
(0.73725, 0.00000, 0.71765),
(0.73333, 0.00000, 0.72549),
(0.72941, 0.00000, 0.73333),
(0.72549, 0.00000, 0.74118),
(0.72157, 0.00000, 0.75294),
(0.72157, 0.00000, 0.75686),
(0.72157, 0.00000, 0.76471),
(0.72157, 0.00000, 0.77255),
(0.72157, 0.00000, 0.78039),
(0.71765, 0.00000, 0.78824),
(0.71373, 0.00000, 0.79608),
(0.70980, 0.00000, 0.80392),
(0.70588, 0.00000, 0.81569),
(0.70196, 0.00000, 0.82353),
(0.69804, 0.00000, 0.83137),
(0.69412, 0.00000, 0.83922),
(0.69020, 0.00000, 0.84706),
(0.69020, 0.00000, 0.85098),
(0.69020, 0.00000, 0.85882),
(0.69020, 0.00000, 0.86667),
(0.69020, 0.00000, 0.87451),
(0.68627, 0.00000, 0.88235),
(0.68235, 0.00000, 0.89020),
(0.67843, 0.00000, 0.89804),
(0.67451, 0.00000, 0.90980),
(0.67059, 0.00000, 0.91765),
(0.66667, 0.00000, 0.92549),
(0.66275, 0.00000, 0.93333),
(0.65882, 0.00000, 0.94510),
(0.65490, 0.00000, 0.95294),
(0.65098, 0.00000, 0.96078),
(0.64706, 0.00000, 0.96863),
(0.64314, 0.00000, 0.98039),
(0.64314, 0.00000, 0.98431),
(0.64314, 0.00000, 0.98824),
(0.64314, 0.00000, 0.99216),
(0.64314, 0.00000, 1.00000),
(0.63922, 0.00000, 1.00000),
(0.63529, 0.00000, 1.00000),
(0.63137, 0.00000, 1.00000),
(0.62745, 0.00000, 1.00000),
(0.62353, 0.00000, 1.00000),
(0.61961, 0.00000, 1.00000),
(0.61569, 0.00000, 1.00000),
(0.61176, 0.00000, 1.00000),
(0.60784, 0.00000, 1.00000),
(0.60392, 0.00000, 1.00000),
(0.60000, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59608, 0.00000, 1.00000),
(0.59216, 0.00000, 1.00000),
(0.58824, 0.00000, 1.00000),
(0.58431, 0.00000, 1.00000),
(0.58039, 0.00000, 1.00000),
(0.59216, 0.03137, 1.00000),
(0.60392, 0.06275, 1.00000),
(0.61569, 0.09412, 1.00000),
(0.62745, 0.12549, 1.00000),
(0.63922, 0.15686, 1.00000),
(0.65098, 0.18824, 1.00000),
(0.66275, 0.21961, 1.00000),
(0.67451, 0.25098, 1.00000),
(0.69020, 0.28235, 1.00000),
(0.70588, 0.31373, 1.00000),
(0.72157, 0.34510, 1.00000),
(0.73725, 0.37647, 1.00000),
(0.74902, 0.40784, 1.00000),
(0.76078, 0.43922, 1.00000),
(0.77255, 0.47059, 1.00000),
(0.78431, 0.50196, 1.00000),
(0.79608, 0.52941, 1.00000),
(0.80784, 0.55686, 1.00000),
(0.81961, 0.58431, 1.00000),
(0.83137, 0.61176, 1.00000),
(0.84314, 0.64314, 1.00000),
(0.85490, 0.67451, 1.00000),
(0.86667, 0.70588, 1.00000),
(0.87843, 0.73725, 1.00000),
(0.89412, 0.76863, 1.00000),
(0.90980, 0.80000, 1.00000),
(0.92549, 0.83137, 1.00000),
(0.94118, 0.86275, 1.00000),
(0.95294, 0.89412, 1.00000),
(0.96471, 0.92549, 1.00000),
(0.97647, 0.95686, 1.00000),
(0.98824, 0.98824, 1.00000),
(0.99216, 0.99216, 1.00000),
(0.99608, 0.99608, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow4 = (
(0.00000, 0.00000, 0.01176),
(0.00000, 0.00000, 0.02745),
(0.00000, 0.00000, 0.04314),
(0.00000, 0.00000, 0.05882),
(0.00000, 0.00000, 0.07451),
(0.00000, 0.00000, 0.09020),
(0.00000, 0.00000, 0.10588),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.15294),
(0.00000, 0.00000, 0.16863),
(0.00000, 0.00000, 0.18431),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.21176),
(0.00000, 0.00000, 0.22745),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.25882),
(0.00000, 0.00000, 0.27451),
(0.00000, 0.00000, 0.29020),
(0.00000, 0.00000, 0.30588),
(0.00000, 0.00000, 0.32157),
(0.00000, 0.00000, 0.33725),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.36863),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.41176),
(0.00000, 0.00000, 0.42745),
(0.00000, 0.00000, 0.44314),
(0.00000, 0.00000, 0.45882),
(0.00000, 0.00000, 0.47451),
(0.00000, 0.00000, 0.49020),
(0.00000, 0.00000, 0.50588),
(0.00000, 0.00000, 0.52157),
(0.00000, 0.00000, 0.53725),
(0.00000, 0.00000, 0.55294),
(0.00000, 0.00000, 0.56863),
(0.00000, 0.00000, 0.58431),
(0.00000, 0.00000, 0.60000),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.62745),
(0.00000, 0.00000, 0.64314),
(0.00000, 0.00000, 0.65882),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.69020),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.72157),
(0.00000, 0.00000, 0.73725),
(0.00000, 0.00000, 0.75294),
(0.00000, 0.00000, 0.76863),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.80000),
(0.00000, 0.00000, 0.81176),
(0.00000, 0.00000, 0.82745),
(0.00000, 0.00000, 0.84314),
(0.00000, 0.00000, 0.85882),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.89020),
(0.00000, 0.00000, 0.90588),
(0.00000, 0.00000, 0.92157),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.95294),
(0.00000, 0.00000, 0.96863),
(0.00000, 0.00000, 0.98431),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03529, 1.00000),
(0.00000, 0.07059, 1.00000),
(0.00000, 0.10980, 1.00000),
(0.00000, 0.14510, 1.00000),
(0.00000, 0.18039, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.29412, 1.00000),
(0.00000, 0.32941, 1.00000),
(0.00000, 0.36471, 1.00000),
(0.00000, 0.40392, 1.00000),
(0.00000, 0.43922, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.50196, 1.00000),
(0.00000, 0.52549, 1.00000),
(0.00000, 0.54902, 1.00000),
(0.00000, 0.57255, 1.00000),
(0.00000, 0.59608, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.64314, 1.00000),
(0.00000, 0.66667, 1.00000),
(0.00000, 0.69020, 1.00000),
(0.00000, 0.71373, 1.00000),
(0.00000, 0.73725, 1.00000),
(0.00000, 0.76078, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.80000, 1.00000),
(0.00000, 0.81569, 1.00000),
(0.00000, 0.83137, 1.00000),
(0.00000, 0.84706, 1.00000),
(0.00000, 0.86667, 1.00000),
(0.00000, 0.88235, 1.00000),
(0.00000, 0.89804, 1.00000),
(0.00000, 0.91373, 1.00000),
(0.00000, 0.93333, 1.00000),
(0.00000, 0.94902, 1.00000),
(0.00000, 0.96471, 1.00000),
(0.00000, 0.98039, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.97647),
(0.00000, 1.00000, 0.95294),
(0.00000, 1.00000, 0.92941),
(0.00000, 1.00000, 0.90588),
(0.00000, 1.00000, 0.88627),
(0.00000, 1.00000, 0.86275),
(0.00000, 1.00000, 0.83922),
(0.00000, 1.00000, 0.81569),
(0.00000, 1.00000, 0.79608),
(0.00000, 1.00000, 0.77255),
(0.00000, 1.00000, 0.74902),
(0.00000, 1.00000, 0.72549),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65098),
(0.00000, 1.00000, 0.59608),
(0.00000, 1.00000, 0.54118),
(0.00000, 1.00000, 0.48627),
(0.00000, 1.00000, 0.43137),
(0.00000, 1.00000, 0.37647),
(0.00000, 1.00000, 0.32549),
(0.00000, 1.00000, 0.27059),
(0.00000, 1.00000, 0.21569),
(0.00000, 1.00000, 0.16078),
(0.00000, 1.00000, 0.10588),
(0.00000, 1.00000, 0.05098),
(0.00000, 1.00000, 0.00000),
(0.05098, 1.00000, 0.00000),
(0.10588, 1.00000, 0.00000),
(0.16078, 1.00000, 0.00000),
(0.21569, 1.00000, 0.00000),
(0.27059, 1.00000, 0.00000),
(0.32549, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.43137, 1.00000, 0.00000),
(0.48627, 1.00000, 0.00000),
(0.54118, 1.00000, 0.00000),
(0.59608, 1.00000, 0.00000),
(0.65098, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.72549, 1.00000, 0.00000),
(0.74902, 1.00000, 0.00000),
(0.77255, 1.00000, 0.00000),
(0.79608, 1.00000, 0.00000),
(0.81569, 1.00000, 0.00000),
(0.83922, 1.00000, 0.00000),
(0.86275, 1.00000, 0.00000),
(0.88627, 1.00000, 0.00000),
(0.90588, 1.00000, 0.00000),
(0.92941, 1.00000, 0.00000),
(0.95294, 1.00000, 0.00000),
(0.97647, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.97647, 0.00000),
(0.99608, 0.95686, 0.00000),
(0.99608, 0.93333, 0.00000),
(0.99608, 0.91373, 0.00000),
(0.99216, 0.89412, 0.00000),
(0.99216, 0.87059, 0.00000),
(0.99216, 0.85098, 0.00000),
(0.99216, 0.82745, 0.00000),
(0.98824, 0.80784, 0.00000),
(0.98824, 0.78824, 0.00000),
(0.98824, 0.76471, 0.00000),
(0.98824, 0.74510, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.70588, 0.00000),
(0.98824, 0.68627, 0.00000),
(0.98824, 0.66667, 0.00000),
(0.98824, 0.64706, 0.00000),
(0.99216, 0.62745, 0.00000),
(0.99216, 0.60784, 0.00000),
(0.99216, 0.58824, 0.00000),
(0.99216, 0.56863, 0.00000),
(0.99608, 0.54902, 0.00000),
(0.99608, 0.52941, 0.00000),
(0.99608, 0.50980, 0.00000),
(0.99608, 0.49020, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.43137, 0.00000),
(1.00000, 0.39608, 0.00000),
(1.00000, 0.36078, 0.00000),
(1.00000, 0.32549, 0.00000),
(1.00000, 0.28627, 0.00000),
(1.00000, 0.25098, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 0.18039, 0.00000),
(1.00000, 0.14118, 0.00000),
(1.00000, 0.10588, 0.00000),
(1.00000, 0.07059, 0.00000),
(1.00000, 0.03529, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.05098),
(1.00000, 0.00000, 0.10588),
(1.00000, 0.00000, 0.16078),
(1.00000, 0.00000, 0.21569),
(1.00000, 0.00000, 0.27059),
(1.00000, 0.00000, 0.32549),
(1.00000, 0.00000, 0.37647),
(1.00000, 0.00000, 0.43137),
(1.00000, 0.00000, 0.48627),
(1.00000, 0.00000, 0.54118),
(1.00000, 0.00000, 0.59608),
(1.00000, 0.00000, 0.65098),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.72549),
(1.00000, 0.00000, 0.74902),
(1.00000, 0.00000, 0.77255),
(1.00000, 0.00000, 0.79608),
(1.00000, 0.00000, 0.81569),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.88627),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.92941),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.97647),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.03529, 1.00000),
(1.00000, 0.07059, 1.00000),
(1.00000, 0.10588, 1.00000),
(1.00000, 0.14118, 1.00000),
(1.00000, 0.18039, 1.00000),
(1.00000, 0.21569, 1.00000),
(1.00000, 0.25098, 1.00000),
(1.00000, 0.28627, 1.00000),
(1.00000, 0.32549, 1.00000),
(1.00000, 0.36078, 1.00000),
(1.00000, 0.39608, 1.00000),
(1.00000, 0.43137, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.48627, 1.00000),
(1.00000, 0.50588, 1.00000),
(1.00000, 0.52157, 1.00000),
(1.00000, 0.54118, 1.00000),
(1.00000, 0.56078, 1.00000),
(1.00000, 0.57647, 1.00000),
(1.00000, 0.59608, 1.00000),
(1.00000, 0.61176, 1.00000),
(1.00000, 0.63137, 1.00000),
(1.00000, 0.65098, 1.00000),
(1.00000, 0.66667, 1.00000),
(1.00000, 0.68627, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.74510, 1.00000),
(1.00000, 0.78824, 1.00000),
(1.00000, 0.83137, 1.00000),
(1.00000, 0.87059, 1.00000),
(1.00000, 0.91373, 1.00000),
(1.00000, 0.95686, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_idl4 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00784),
(0.00000, 0.00000, 0.01569),
(0.00000, 0.00000, 0.02353),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.04706),
(0.00000, 0.00000, 0.05490),
(0.00000, 0.00000, 0.06275),
(0.00000, 0.00000, 0.07059),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.08627),
(0.00000, 0.00000, 0.09804),
(0.00000, 0.00000, 0.10588),
(0.00000, 0.00000, 0.11373),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.12941),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.14510),
(0.00000, 0.00000, 0.15294),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.16863),
(0.00000, 0.00000, 0.17647),
(0.00000, 0.00000, 0.18431),
(0.00000, 0.00000, 0.19608),
(0.00000, 0.00000, 0.20392),
(0.00000, 0.00000, 0.21176),
(0.00000, 0.00000, 0.21961),
(0.00000, 0.00000, 0.22745),
(0.00000, 0.00000, 0.23529),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.25098),
(0.00000, 0.00000, 0.25882),
(0.00000, 0.01176, 0.26667),
(0.00000, 0.02353, 0.27451),
(0.00000, 0.03529, 0.28235),
(0.00000, 0.04706, 0.29412),
(0.00000, 0.05882, 0.30196),
(0.00000, 0.07059, 0.30980),
(0.00000, 0.08235, 0.31765),
(0.00000, 0.09804, 0.32549),
(0.00000, 0.10980, 0.33333),
(0.00000, 0.12157, 0.34118),
(0.00000, 0.13333, 0.34902),
(0.00000, 0.14510, 0.35686),
(0.00000, 0.15686, 0.36471),
(0.00000, 0.16863, 0.37255),
(0.00000, 0.18039, 0.38039),
(0.00000, 0.19608, 0.39216),
(0.00000, 0.20784, 0.39216),
(0.00000, 0.21961, 0.39216),
(0.00000, 0.23137, 0.39216),
(0.00000, 0.24314, 0.39216),
(0.00000, 0.25490, 0.39216),
(0.00000, 0.26667, 0.39216),
(0.00000, 0.27843, 0.39216),
(0.00000, 0.29412, 0.39216),
(0.00000, 0.30588, 0.39216),
(0.00000, 0.31765, 0.39216),
(0.00000, 0.32941, 0.39216),
(0.00000, 0.34118, 0.39216),
(0.00000, 0.35294, 0.39216),
(0.00000, 0.36471, 0.39216),
(0.00000, 0.37647, 0.39216),
(0.00000, 0.39216, 0.39216),
(0.00000, 0.40392, 0.39216),
(0.00000, 0.41569, 0.39216),
(0.00000, 0.42745, 0.39216),
(0.00000, 0.43922, 0.39216),
(0.00000, 0.45098, 0.39216),
(0.00000, 0.46275, 0.39216),
(0.00000, 0.47451, 0.39216),
(0.00000, 0.49020, 0.39216),
(0.00000, 0.50196, 0.39216),
(0.00000, 0.51373, 0.39216),
(0.00000, 0.52549, 0.39216),
(0.00000, 0.53725, 0.39216),
(0.00000, 0.54902, 0.39216),
(0.00000, 0.56078, 0.39216),
(0.00000, 0.57255, 0.39216),
(0.00000, 0.58824, 0.39216),
(0.00000, 0.58824, 0.37647),
(0.00000, 0.58824, 0.36471),
(0.00000, 0.58824, 0.35294),
(0.00000, 0.58824, 0.34118),
(0.00000, 0.58824, 0.32941),
(0.00000, 0.58824, 0.31765),
(0.00000, 0.58824, 0.30588),
(0.00000, 0.58824, 0.29412),
(0.00000, 0.58824, 0.27843),
(0.00000, 0.58824, 0.26667),
(0.00000, 0.58824, 0.25490),
(0.00000, 0.58824, 0.24314),
(0.00000, 0.58824, 0.23137),
(0.00000, 0.58824, 0.21961),
(0.00000, 0.58824, 0.20784),
(0.00000, 0.58824, 0.19608),
(0.00000, 0.58431, 0.18039),
(0.00000, 0.58039, 0.16863),
(0.00000, 0.58039, 0.15686),
(0.00000, 0.57647, 0.14510),
(0.00000, 0.57255, 0.13333),
(0.00000, 0.57255, 0.12157),
(0.00000, 0.56863, 0.10980),
(0.00000, 0.56863, 0.09804),
(0.00000, 0.56471, 0.08235),
(0.00000, 0.56078, 0.07059),
(0.00000, 0.56078, 0.05882),
(0.00000, 0.55686, 0.04706),
(0.00000, 0.55294, 0.03529),
(0.00000, 0.55294, 0.02353),
(0.00000, 0.54902, 0.01176),
(0.00000, 0.54902, 0.00000),
(0.02745, 0.53725, 0.00000),
(0.05882, 0.52941, 0.00000),
(0.08627, 0.51765, 0.00000),
(0.11765, 0.50980, 0.00000),
(0.14510, 0.49804, 0.00000),
(0.17647, 0.49020, 0.00000),
(0.20392, 0.47843, 0.00000),
(0.23529, 0.47059, 0.00000),
(0.26275, 0.45882, 0.00000),
(0.29412, 0.45098, 0.00000),
(0.32157, 0.43922, 0.00000),
(0.35294, 0.43137, 0.00000),
(0.38039, 0.41961, 0.00000),
(0.41176, 0.41176, 0.00000),
(0.43922, 0.40000, 0.00000),
(0.47059, 0.39216, 0.00000),
(0.49020, 0.36471, 0.00000),
(0.50980, 0.34118, 0.00000),
(0.52941, 0.31765, 0.00000),
(0.54902, 0.29412, 0.00000),
(0.56863, 0.26667, 0.00000),
(0.58824, 0.24314, 0.00000),
(0.60784, 0.21961, 0.00000),
(0.62745, 0.19608, 0.00000),
(0.64706, 0.16863, 0.00000),
(0.66667, 0.14510, 0.00000),
(0.68627, 0.12157, 0.00000),
(0.70588, 0.09804, 0.00000),
(0.72549, 0.07059, 0.00000),
(0.74510, 0.04706, 0.00000),
(0.76471, 0.02353, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00784, 0.00000),
(0.78824, 0.01569, 0.00000),
(0.78824, 0.02353, 0.00000),
(0.79216, 0.03529, 0.00000),
(0.79216, 0.04314, 0.00000),
(0.79608, 0.05098, 0.00000),
(0.79608, 0.06275, 0.00000),
(0.80000, 0.07059, 0.00000),
(0.80000, 0.07843, 0.00000),
(0.80392, 0.09020, 0.00000),
(0.80392, 0.09804, 0.00000),
(0.80784, 0.10588, 0.00000),
(0.80784, 0.11373, 0.00000),
(0.81176, 0.12549, 0.00000),
(0.81176, 0.13333, 0.00000),
(0.81569, 0.14118, 0.00000),
(0.81569, 0.15294, 0.00000),
(0.81961, 0.16078, 0.00000),
(0.81961, 0.16863, 0.00000),
(0.82353, 0.18039, 0.00000),
(0.82353, 0.18824, 0.00000),
(0.82745, 0.19608, 0.00000),
(0.82745, 0.20784, 0.00000),
(0.83137, 0.21569, 0.00000),
(0.83137, 0.22353, 0.00000),
(0.83529, 0.23137, 0.00000),
(0.83529, 0.24314, 0.00000),
(0.83922, 0.25098, 0.00000),
(0.83922, 0.25882, 0.00000),
(0.84314, 0.27059, 0.00000),
(0.84314, 0.27843, 0.00000),
(0.84706, 0.28627, 0.00000),
(0.84706, 0.29804, 0.00000),
(0.85098, 0.30588, 0.00000),
(0.85098, 0.31373, 0.00000),
(0.85490, 0.32549, 0.00000),
(0.85490, 0.33333, 0.00000),
(0.85882, 0.34118, 0.00000),
(0.85882, 0.34902, 0.00000),
(0.86275, 0.36078, 0.00000),
(0.86275, 0.36863, 0.00000),
(0.86667, 0.37647, 0.00000),
(0.86667, 0.38824, 0.00000),
(0.87059, 0.39608, 0.00000),
(0.87059, 0.40392, 0.00000),
(0.87451, 0.41569, 0.00000),
(0.87451, 0.42353, 0.00000),
(0.87843, 0.43137, 0.00000),
(0.87843, 0.44314, 0.00000),
(0.88235, 0.45098, 0.00000),
(0.88235, 0.45882, 0.00000),
(0.88627, 0.46667, 0.00000),
(0.88627, 0.47843, 0.00000),
(0.89020, 0.48627, 0.00000),
(0.89020, 0.49412, 0.00000),
(0.89412, 0.50588, 0.00000),
(0.89412, 0.51373, 0.00000),
(0.89804, 0.52157, 0.00000),
(0.89804, 0.53333, 0.00000),
(0.90196, 0.54118, 0.00000),
(0.90196, 0.54902, 0.00000),
(0.90588, 0.55686, 0.00000),
(0.90588, 0.56863, 0.00000),
(0.90980, 0.57647, 0.00000),
(0.90980, 0.58431, 0.00000),
(0.91373, 0.59608, 0.00000),
(0.91373, 0.60392, 0.00000),
(0.91765, 0.61176, 0.00000),
(0.91765, 0.62353, 0.00000),
(0.92157, 0.63137, 0.00000),
(0.92157, 0.63922, 0.00000),
(0.92549, 0.65098, 0.00000),
(0.92549, 0.65882, 0.00000),
(0.92941, 0.66667, 0.00000),
(0.92941, 0.67451, 0.00000),
(0.93333, 0.68627, 0.00000),
(0.93333, 0.69412, 0.00000),
(0.93725, 0.70196, 0.00000),
(0.93725, 0.71373, 0.00000),
(0.94118, 0.72157, 0.00000),
(0.94118, 0.72941, 0.00000),
(0.94510, 0.74118, 0.00000),
(0.94510, 0.74902, 0.00000),
(0.94902, 0.75686, 0.00000),
(0.94902, 0.76863, 0.00000),
(0.95294, 0.77647, 0.00000),
(0.95294, 0.78431, 0.00000),
(0.95686, 0.79216, 0.00000),
(0.95686, 0.80392, 0.00000),
(0.96078, 0.81176, 0.00000),
(0.96078, 0.81961, 0.00000),
(0.96471, 0.83137, 0.00000),
(0.96471, 0.83922, 0.00000),
(0.96863, 0.84706, 0.00000),
(0.96863, 0.85882, 0.00000),
(0.97255, 0.86667, 0.00000),
(0.97255, 0.87451, 0.00000),
(0.97647, 0.88627, 0.00000),
(0.97647, 0.89412, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90980, 0.00000),
(0.98431, 0.92157, 0.00000),
(0.98431, 0.92941, 0.00000),
(0.98824, 0.93725, 0.00000),
(0.98824, 0.94902, 0.00000),
(0.99216, 0.95686, 0.00000),
(0.99216, 0.96471, 0.00000),
(0.99608, 0.97647, 0.00000),
(0.99608, 0.98431, 0.00000),
(1.00000, 0.99216, 0.00000),
(1.00000, 1.00000, 0.00000),
)
cmap_idl5 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.05882),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.10196),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.14118),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.18039),
(0.00000, 0.00000, 0.20392),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.26275),
(0.00000, 0.00000, 0.28235),
(0.00000, 0.00000, 0.30588),
(0.00000, 0.00000, 0.32549),
(0.00000, 0.00000, 0.34510),
(0.00000, 0.00000, 0.36471),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.40784),
(0.00000, 0.00000, 0.42745),
(0.00000, 0.00000, 0.44706),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.48627),
(0.00000, 0.00000, 0.50980),
(0.00000, 0.00000, 0.52941),
(0.00000, 0.00000, 0.54902),
(0.00000, 0.00000, 0.56863),
(0.00000, 0.00000, 0.58824),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.63137),
(0.00000, 0.00000, 0.65098),
(0.00000, 0.00000, 0.67059),
(0.00000, 0.00000, 0.69020),
(0.00000, 0.00000, 0.71373),
(0.00000, 0.00000, 0.73333),
(0.00000, 0.00000, 0.75294),
(0.00000, 0.00000, 0.77255),
(0.00000, 0.00000, 0.79216),
(0.00000, 0.00000, 0.81569),
(0.00000, 0.00000, 0.83529),
(0.00000, 0.00000, 0.85490),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.89412),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.95686),
(0.01569, 0.00000, 0.97647),
(0.03529, 0.00000, 1.00000),
(0.05490, 0.00000, 0.98039),
(0.07451, 0.00000, 0.96078),
(0.09020, 0.00000, 0.93725),
(0.10980, 0.00000, 0.91765),
(0.12941, 0.00000, 0.89412),
(0.14902, 0.00000, 0.87451),
(0.16471, 0.00000, 0.85490),
(0.18431, 0.00000, 0.83137),
(0.20392, 0.00000, 0.81176),
(0.22353, 0.00000, 0.78824),
(0.23922, 0.00000, 0.76863),
(0.25882, 0.00000, 0.74510),
(0.27843, 0.00000, 0.72549),
(0.29804, 0.00000, 0.70588),
(0.31765, 0.00000, 0.68235),
(0.31765, 0.00000, 0.66275),
(0.31765, 0.00000, 0.63922),
(0.31765, 0.00000, 0.61961),
(0.31765, 0.00000, 0.59608),
(0.31765, 0.00000, 0.57647),
(0.31765, 0.00000, 0.55686),
(0.31765, 0.00000, 0.53333),
(0.31373, 0.00000, 0.51373),
(0.31373, 0.00000, 0.49020),
(0.31373, 0.00000, 0.47059),
(0.31373, 0.00000, 0.44706),
(0.31373, 0.00000, 0.42745),
(0.31373, 0.00000, 0.40784),
(0.31373, 0.00000, 0.38431),
(0.30980, 0.00000, 0.36471),
(0.32941, 0.00000, 0.34118),
(0.34902, 0.00000, 0.32157),
(0.36863, 0.00000, 0.29804),
(0.38824, 0.00000, 0.27843),
(0.40784, 0.00000, 0.25882),
(0.42745, 0.00000, 0.23529),
(0.44706, 0.00000, 0.21569),
(0.46667, 0.00000, 0.19216),
(0.48627, 0.00000, 0.17255),
(0.50588, 0.00000, 0.14902),
(0.52549, 0.00000, 0.12941),
(0.54510, 0.00000, 0.10980),
(0.56471, 0.00000, 0.08627),
(0.58431, 0.00000, 0.06667),
(0.60392, 0.00000, 0.04314),
(0.62353, 0.00000, 0.02353),
(0.64314, 0.00000, 0.00000),
(0.66275, 0.00000, 0.00000),
(0.68235, 0.00000, 0.00000),
(0.70588, 0.00000, 0.00000),
(0.72549, 0.00000, 0.00000),
(0.74510, 0.00000, 0.00000),
(0.76863, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.80784, 0.00000, 0.00000),
(0.83137, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.87059, 0.00000, 0.00000),
(0.89412, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.01961, 0.00000),
(1.00000, 0.03922, 0.00000),
(1.00000, 0.06275, 0.00000),
(1.00000, 0.08235, 0.00000),
(1.00000, 0.10588, 0.00000),
(1.00000, 0.12549, 0.00000),
(1.00000, 0.14510, 0.00000),
(1.00000, 0.16863, 0.00000),
(1.00000, 0.18824, 0.00000),
(1.00000, 0.21176, 0.00000),
(1.00000, 0.23137, 0.00000),
(1.00000, 0.25098, 0.00000),
(1.00000, 0.27451, 0.00000),
(1.00000, 0.29412, 0.00000),
(1.00000, 0.31765, 0.00000),
(1.00000, 0.33333, 0.01569),
(1.00000, 0.35294, 0.03529),
(1.00000, 0.37255, 0.05490),
(1.00000, 0.39216, 0.07451),
(1.00000, 0.41176, 0.09412),
(1.00000, 0.42745, 0.10980),
(1.00000, 0.44706, 0.12941),
(1.00000, 0.46667, 0.14902),
(1.00000, 0.48627, 0.16863),
(1.00000, 0.50588, 0.18824),
(1.00000, 0.52549, 0.20784),
(1.00000, 0.54118, 0.22353),
(1.00000, 0.56078, 0.24314),
(1.00000, 0.58039, 0.26275),
(1.00000, 0.60000, 0.28235),
(1.00000, 0.61961, 0.30196),
(1.00000, 0.63922, 0.32157),
(1.00000, 0.63922, 0.30196),
(1.00000, 0.63922, 0.27843),
(1.00000, 0.63922, 0.25490),
(1.00000, 0.63922, 0.23137),
(1.00000, 0.63922, 0.20784),
(1.00000, 0.63922, 0.18431),
(1.00000, 0.63922, 0.16078),
(1.00000, 0.63922, 0.14118),
(1.00000, 0.63922, 0.11765),
(1.00000, 0.63922, 0.09412),
(1.00000, 0.63922, 0.07059),
(1.00000, 0.63922, 0.04706),
(1.00000, 0.63922, 0.02353),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.63922, 0.00000),
(0.97255, 0.63922, 0.00000),
(0.94118, 0.63922, 0.00000),
(0.90980, 0.63922, 0.00000),
(0.88235, 0.63922, 0.00000),
(0.85098, 0.63922, 0.00000),
(0.81961, 0.63922, 0.00000),
(0.79216, 0.63922, 0.00000),
(0.76078, 0.63922, 0.00000),
(0.72941, 0.63922, 0.00000),
(0.70196, 0.63922, 0.00000),
(0.67059, 0.63922, 0.00000),
(0.63922, 0.63922, 0.00000),
(0.65882, 0.63922, 0.00000),
(0.67843, 0.63922, 0.00000),
(0.69804, 0.66275, 0.01176),
(0.71765, 0.68627, 0.02353),
(0.73725, 0.70980, 0.03529),
(0.75686, 0.73333, 0.04706),
(0.77647, 0.75686, 0.06275),
(0.79608, 0.78039, 0.07451),
(0.81961, 0.80392, 0.08627),
(0.83922, 0.83137, 0.09804),
(0.85882, 0.85490, 0.11373),
(0.87843, 0.87843, 0.12549),
(0.89804, 0.90196, 0.13725),
(0.91765, 0.92549, 0.14902),
(0.93725, 0.94902, 0.16078),
(0.95686, 0.97255, 0.17647),
(0.97647, 1.00000, 0.18824),
(1.00000, 1.00000, 0.20000),
(1.00000, 1.00000, 0.21176),
(1.00000, 1.00000, 0.22745),
(1.00000, 1.00000, 0.23922),
(1.00000, 1.00000, 0.25098),
(1.00000, 1.00000, 0.26275),
(1.00000, 1.00000, 0.27843),
(1.00000, 1.00000, 0.29020),
(1.00000, 1.00000, 0.30196),
(1.00000, 1.00000, 0.31373),
(1.00000, 1.00000, 0.32549),
(1.00000, 1.00000, 0.34118),
(1.00000, 1.00000, 0.35294),
(1.00000, 1.00000, 0.36471),
(1.00000, 1.00000, 0.37647),
(1.00000, 1.00000, 0.39216),
(1.00000, 1.00000, 0.40392),
(1.00000, 1.00000, 0.41569),
(1.00000, 1.00000, 0.42745),
(1.00000, 1.00000, 0.43922),
(1.00000, 1.00000, 0.45490),
(1.00000, 1.00000, 0.46667),
(1.00000, 1.00000, 0.47843),
(1.00000, 1.00000, 0.49020),
(1.00000, 1.00000, 0.50588),
(1.00000, 1.00000, 0.51765),
(1.00000, 1.00000, 0.52941),
(1.00000, 1.00000, 0.54118),
(1.00000, 1.00000, 0.55686),
(1.00000, 1.00000, 0.56863),
(1.00000, 1.00000, 0.58039),
(1.00000, 1.00000, 0.59216),
(1.00000, 1.00000, 0.60392),
(1.00000, 1.00000, 0.61961),
(1.00000, 1.00000, 0.63137),
(1.00000, 1.00000, 0.64314),
(1.00000, 1.00000, 0.65490),
(1.00000, 1.00000, 0.67059),
(1.00000, 1.00000, 0.68235),
(1.00000, 1.00000, 0.69412),
(1.00000, 1.00000, 0.70588),
(1.00000, 1.00000, 0.71765),
(1.00000, 1.00000, 0.73333),
(1.00000, 1.00000, 0.74510),
(1.00000, 1.00000, 0.75686),
(1.00000, 1.00000, 0.76863),
(1.00000, 1.00000, 0.78431),
(1.00000, 1.00000, 0.79608),
(1.00000, 1.00000, 0.80784),
(1.00000, 1.00000, 0.81961),
(1.00000, 1.00000, 0.83529),
(1.00000, 1.00000, 0.84706),
(1.00000, 1.00000, 0.85882),
(1.00000, 1.00000, 0.87059),
(1.00000, 1.00000, 0.88235),
(1.00000, 1.00000, 0.89804),
(1.00000, 1.00000, 0.90980),
(1.00000, 1.00000, 0.92157),
(1.00000, 1.00000, 0.93333),
(1.00000, 1.00000, 0.94902),
(1.00000, 1.00000, 0.96078),
(1.00000, 1.00000, 0.97255),
(1.00000, 1.00000, 0.98431),
(1.00000, 1.00000, 1.00000),
)
cmap_idl6 = (
(0.00000, 0.00000, 0.00000),
(0.01176, 0.00000, 0.00000),
(0.02745, 0.00000, 0.00000),
(0.04314, 0.00000, 0.00000),
(0.05882, 0.00000, 0.00000),
(0.07451, 0.00000, 0.00000),
(0.08627, 0.00000, 0.00000),
(0.10196, 0.00000, 0.00000),
(0.11765, 0.00000, 0.00000),
(0.13333, 0.00000, 0.00000),
(0.14902, 0.00000, 0.00000),
(0.16078, 0.00000, 0.00000),
(0.17647, 0.00000, 0.00000),
(0.19216, 0.00000, 0.00000),
(0.20784, 0.00000, 0.00000),
(0.22353, 0.00000, 0.00000),
(0.23529, 0.00000, 0.00000),
(0.25098, 0.00000, 0.00000),
(0.26667, 0.00000, 0.00000),
(0.28235, 0.00000, 0.00000),
(0.29804, 0.00000, 0.00000),
(0.30980, 0.00000, 0.00000),
(0.32549, 0.00000, 0.00000),
(0.34118, 0.00000, 0.00000),
(0.35686, 0.00000, 0.00000),
(0.37255, 0.00000, 0.00000),
(0.38431, 0.00000, 0.00000),
(0.40000, 0.00000, 0.00000),
(0.41569, 0.00000, 0.00000),
(0.43137, 0.00000, 0.00000),
(0.44706, 0.00000, 0.00000),
(0.45882, 0.00000, 0.00000),
(0.47451, 0.00000, 0.00000),
(0.49020, 0.00000, 0.00000),
(0.50588, 0.00000, 0.00000),
(0.52157, 0.00000, 0.00000),
(0.53725, 0.00000, 0.00000),
(0.54902, 0.00000, 0.00000),
(0.56471, 0.00000, 0.00000),
(0.58039, 0.00000, 0.00000),
(0.59608, 0.00000, 0.00000),
(0.61176, 0.00000, 0.00000),
(0.62353, 0.00000, 0.00000),
(0.63922, 0.00000, 0.00000),
(0.65490, 0.00000, 0.00000),
(0.67059, 0.00000, 0.00000),
(0.68627, 0.00000, 0.00000),
(0.69804, 0.00000, 0.00000),
(0.71373, 0.00000, 0.00000),
(0.72941, 0.00000, 0.00000),
(0.74510, 0.00000, 0.00000),
(0.76078, 0.00000, 0.00000),
(0.77255, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.80392, 0.00000, 0.00000),
(0.81961, 0.00000, 0.00000),
(0.83529, 0.00000, 0.00000),
(0.84706, 0.00000, 0.00000),
(0.86275, 0.00000, 0.00000),
(0.87843, 0.00000, 0.00000),
(0.89412, 0.00000, 0.00000),
(0.90980, 0.00000, 0.00000),
(0.92157, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.95294, 0.00000, 0.00000),
(0.96863, 0.01176, 0.00000),
(0.98431, 0.02745, 0.00000),
(1.00000, 0.04314, 0.00000),
(0.98431, 0.05882, 0.00000),
(0.96863, 0.07451, 0.00000),
(0.95294, 0.09020, 0.00000),
(0.93725, 0.10588, 0.00000),
(0.92157, 0.12157, 0.00000),
(0.90196, 0.13725, 0.00000),
(0.88627, 0.15294, 0.00000),
(0.87059, 0.16863, 0.00000),
(0.85490, 0.18431, 0.00000),
(0.83922, 0.20000, 0.00000),
(0.82353, 0.21569, 0.00000),
(0.80392, 0.23137, 0.00000),
(0.78824, 0.24706, 0.00000),
(0.77255, 0.26275, 0.00000),
(0.75686, 0.27843, 0.00000),
(0.74118, 0.29412, 0.00000),
(0.72157, 0.30980, 0.00000),
(0.70588, 0.32549, 0.00000),
(0.69020, 0.34118, 0.00000),
(0.67451, 0.35686, 0.00000),
(0.65882, 0.37255, 0.00000),
(0.64314, 0.38824, 0.00000),
(0.62353, 0.40392, 0.00000),
(0.60784, 0.41961, 0.00000),
(0.59216, 0.43529, 0.00000),
(0.57647, 0.45098, 0.00000),
(0.56078, 0.46667, 0.00000),
(0.54118, 0.48235, 0.00000),
(0.52549, 0.49804, 0.00000),
(0.50980, 0.51373, 0.00000),
(0.49412, 0.52941, 0.00000),
(0.47843, 0.54510, 0.00000),
(0.46275, 0.56078, 0.00000),
(0.44314, 0.57647, 0.00000),
(0.42745, 0.59216, 0.00000),
(0.41176, 0.60784, 0.00000),
(0.39608, 0.62353, 0.00000),
(0.38039, 0.63922, 0.00000),
(0.36078, 0.65490, 0.00000),
(0.34510, 0.67059, 0.00000),
(0.32941, 0.68627, 0.00000),
(0.31373, 0.70196, 0.00000),
(0.29804, 0.71765, 0.00000),
(0.28235, 0.73333, 0.00000),
(0.26275, 0.74902, 0.00000),
(0.24706, 0.76471, 0.00000),
(0.23137, 0.78039, 0.00000),
(0.21569, 0.79608, 0.00000),
(0.20000, 0.81176, 0.00000),
(0.18039, 0.82745, 0.00000),
(0.16471, 0.84314, 0.00000),
(0.14902, 0.85882, 0.00000),
(0.13333, 0.87451, 0.00000),
(0.11765, 0.89020, 0.00000),
(0.10196, 0.90588, 0.00000),
(0.08235, 0.92157, 0.00000),
(0.06667, 0.93725, 0.00000),
(0.05098, 0.95294, 0.00000),
(0.03529, 0.96863, 0.00000),
(0.01961, 0.98431, 0.01176),
(0.00000, 1.00000, 0.02745),
(0.00000, 0.98431, 0.04314),
(0.00000, 0.96863, 0.05882),
(0.00000, 0.95294, 0.07451),
(0.00000, 0.93725, 0.09020),
(0.00000, 0.92157, 0.10588),
(0.00000, 0.90588, 0.11765),
(0.00000, 0.89020, 0.13333),
(0.00000, 0.87451, 0.14902),
(0.00000, 0.85882, 0.16471),
(0.00000, 0.84314, 0.18039),
(0.00000, 0.82745, 0.19608),
(0.00000, 0.81176, 0.21176),
(0.00000, 0.79608, 0.22353),
(0.00000, 0.78039, 0.23922),
(0.00000, 0.76471, 0.25490),
(0.00000, 0.74902, 0.27059),
(0.00000, 0.73333, 0.28627),
(0.00000, 0.71765, 0.30196),
(0.00000, 0.70196, 0.31765),
(0.00000, 0.68627, 0.33333),
(0.00000, 0.66667, 0.34510),
(0.00000, 0.65098, 0.36078),
(0.00000, 0.63529, 0.37647),
(0.00000, 0.61961, 0.39216),
(0.00000, 0.60392, 0.40784),
(0.00000, 0.58824, 0.42353),
(0.00000, 0.57255, 0.43922),
(0.00000, 0.55686, 0.45098),
(0.00000, 0.54118, 0.46667),
(0.00000, 0.52549, 0.48235),
(0.00000, 0.50980, 0.49804),
(0.00000, 0.49412, 0.51373),
(0.00000, 0.47843, 0.52941),
(0.00000, 0.46275, 0.54510),
(0.00000, 0.44706, 0.55686),
(0.00000, 0.43137, 0.57255),
(0.00000, 0.41569, 0.58824),
(0.00000, 0.40000, 0.60392),
(0.00000, 0.38431, 0.61961),
(0.00000, 0.36863, 0.63529),
(0.00000, 0.35294, 0.65098),
(0.00000, 0.33333, 0.66667),
(0.00000, 0.31765, 0.67843),
(0.00000, 0.30196, 0.69412),
(0.00000, 0.28627, 0.70980),
(0.00000, 0.27059, 0.72549),
(0.00000, 0.25490, 0.74118),
(0.00000, 0.23922, 0.75686),
(0.00000, 0.22353, 0.77255),
(0.00000, 0.20784, 0.78431),
(0.00000, 0.19216, 0.80000),
(0.00000, 0.17647, 0.81569),
(0.00000, 0.16078, 0.83137),
(0.00000, 0.14510, 0.84706),
(0.00000, 0.12941, 0.86275),
(0.00000, 0.11373, 0.87843),
(0.00000, 0.09804, 0.89020),
(0.00000, 0.08235, 0.90588),
(0.00000, 0.06667, 0.92157),
(0.00000, 0.05098, 0.93725),
(0.00000, 0.03529, 0.95294),
(0.00000, 0.01961, 0.96863),
(0.00000, 0.00000, 0.98431),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 0.98431),
(0.00000, 0.00000, 0.96863),
(0.00000, 0.00000, 0.95294),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.92157),
(0.00000, 0.00000, 0.90588),
(0.00000, 0.00000, 0.89020),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.85882),
(0.00000, 0.00000, 0.84314),
(0.00000, 0.00000, 0.82745),
(0.00000, 0.00000, 0.81176),
(0.00000, 0.00000, 0.79608),
(0.00000, 0.00000, 0.78039),
(0.00000, 0.00000, 0.76471),
(0.00000, 0.00000, 0.74902),
(0.00000, 0.00000, 0.73333),
(0.00000, 0.00000, 0.71765),
(0.00000, 0.00000, 0.70196),
(0.00000, 0.00000, 0.68627),
(0.00000, 0.00000, 0.66667),
(0.00000, 0.00000, 0.65098),
(0.00000, 0.00000, 0.63529),
(0.00000, 0.00000, 0.61961),
(0.00000, 0.00000, 0.60392),
(0.00000, 0.00000, 0.58824),
(0.00000, 0.00000, 0.57255),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.54118),
(0.00000, 0.00000, 0.52549),
(0.00000, 0.00000, 0.50980),
(0.00000, 0.00000, 0.49412),
(0.00000, 0.00000, 0.47843),
(0.00000, 0.00000, 0.46275),
(0.00000, 0.00000, 0.44706),
(0.00000, 0.00000, 0.43137),
(0.00000, 0.00000, 0.41569),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.36863),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.30196),
(0.00000, 0.00000, 0.28627),
(0.00000, 0.00000, 0.27059),
(0.00000, 0.00000, 0.25490),
(0.00000, 0.00000, 0.23922),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.20784),
(0.00000, 0.00000, 0.19216),
(0.00000, 0.00000, 0.17647),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.14510),
(0.00000, 0.00000, 0.12941),
(0.00000, 0.00000, 0.11373),
(0.00000, 0.00000, 0.09804),
(0.00000, 0.00000, 0.08235),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.05098),
(0.00000, 0.00000, 0.03529),
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.00000),
)
cmap_smooth1 = (
(0.30980, 0.29020, 0.22353),
(0.32157, 0.30196, 0.23922),
(0.33333, 0.31765, 0.25490),
(0.34510, 0.32941, 0.27059),
(0.35686, 0.34510, 0.29020),
(0.36863, 0.36078, 0.30588),
(0.38039, 0.37647, 0.32549),
(0.39216, 0.38824, 0.34510),
(0.40392, 0.40392, 0.36471),
(0.41569, 0.41961, 0.38431),
(0.42745, 0.43529, 0.40392),
(0.43922, 0.45098, 0.42353),
(0.45098, 0.46667, 0.44314),
(0.46275, 0.48235, 0.46667),
(0.47451, 0.49804, 0.48627),
(0.49020, 0.51765, 0.50980),
(0.50196, 0.53333, 0.53333),
(0.51373, 0.54902, 0.55686),
(0.52549, 0.56863, 0.58039),
(0.54118, 0.58431, 0.60392),
(0.55294, 0.60000, 0.62745),
(0.56863, 0.61961, 0.65098),
(0.58039, 0.63529, 0.67843),
(0.59216, 0.65490, 0.70196),
(0.60784, 0.67059, 0.72941),
(0.61961, 0.69020, 0.75686),
(0.63529, 0.70980, 0.78431),
(0.64706, 0.72549, 0.75686),
(0.66275, 0.74510, 0.72941),
(0.67843, 0.76471, 0.70588),
(0.69020, 0.78431, 0.68235),
(0.70588, 0.80392, 0.65882),
(0.71765, 0.82353, 0.64314),
(0.73333, 0.80392, 0.62353),
(0.74902, 0.78824, 0.60392),
(0.76471, 0.77255, 0.58824),
(0.77647, 0.75686, 0.57255),
(0.79216, 0.74118, 0.55686),
(0.80784, 0.73333, 0.54118),
(0.82353, 0.71765, 0.52941),
(0.83922, 0.70588, 0.51373),
(0.85490, 0.69804, 0.50588),
(0.87059, 0.68235, 0.49412),
(0.85490, 0.67451, 0.48627),
(0.83922, 0.66667, 0.47843),
(0.82745, 0.65882, 0.47059),
(0.81569, 0.65098, 0.46275),
(0.80392, 0.63922, 0.45882),
(0.79216, 0.63529, 0.45490),
(0.78431, 0.62745, 0.45098),
(0.77255, 0.61961, 0.44706),
(0.76078, 0.61176, 0.44706),
(0.74902, 0.60784, 0.44706),
(0.74510, 0.60392, 0.44706),
(0.73333, 0.60000, 0.44706),
(0.72941, 0.59608, 0.45098),
(0.71765, 0.59216, 0.45490),
(0.70980, 0.58824, 0.45882),
(0.70588, 0.58824, 0.46275),
(0.69412, 0.58431, 0.47059),
(0.69020, 0.58039, 0.47843),
(0.68235, 0.58039, 0.48627),
(0.67843, 0.58039, 0.49412),
(0.67451, 0.57647, 0.50588),
(0.66667, 0.58039, 0.51373),
(0.66275, 0.57647, 0.52941),
(0.65490, 0.58039, 0.54118),
(0.65098, 0.58039, 0.55686),
(0.64706, 0.58039, 0.57255),
(0.64314, 0.58431, 0.58824),
(0.63529, 0.58824, 0.60392),
(0.63529, 0.58824, 0.62353),
(0.63137, 0.59216, 0.64314),
(0.62745, 0.59608, 0.65882),
(0.62745, 0.60000, 0.68235),
(0.62353, 0.60392, 0.70588),
(0.62353, 0.60784, 0.72941),
(0.61961, 0.61176, 0.75686),
(0.61961, 0.61961, 0.78431),
(0.61569, 0.62745, 0.75686),
(0.61569, 0.63529, 0.72941),
(0.61176, 0.63922, 0.70588),
(0.61176, 0.65098, 0.68235),
(0.61176, 0.65882, 0.65882),
(0.61176, 0.66667, 0.64314),
(0.61176, 0.67451, 0.62353),
(0.61176, 0.68235, 0.60392),
(0.61176, 0.69804, 0.58824),
(0.61176, 0.70588, 0.57255),
(0.61569, 0.71765, 0.55686),
(0.61569, 0.73333, 0.54118),
(0.61961, 0.74118, 0.52941),
(0.61961, 0.75686, 0.51373),
(0.62353, 0.77255, 0.50588),
(0.62353, 0.78824, 0.49412),
(0.62745, 0.80392, 0.48627),
(0.62745, 0.82353, 0.47843),
(0.63137, 0.80392, 0.47059),
(0.63529, 0.78824, 0.46275),
(0.63529, 0.77255, 0.45882),
(0.64314, 0.75686, 0.45490),
(0.64706, 0.74118, 0.45098),
(0.65098, 0.73333, 0.44706),
(0.65490, 0.71765, 0.44706),
(0.66275, 0.70588, 0.44706),
(0.66667, 0.69804, 0.44706),
(0.67451, 0.68235, 0.44706),
(0.67843, 0.67451, 0.45098),
(0.68235, 0.66667, 0.45490),
(0.69020, 0.65882, 0.45882),
(0.69412, 0.65098, 0.46275),
(0.70588, 0.63922, 0.47059),
(0.70980, 0.63529, 0.47843),
(0.71765, 0.62745, 0.48627),
(0.72941, 0.61961, 0.49412),
(0.73333, 0.61176, 0.50588),
(0.74510, 0.60784, 0.51373),
(0.74902, 0.60392, 0.52941),
(0.76078, 0.60000, 0.54118),
(0.77647, 0.59608, 0.55686),
(0.79216, 0.59216, 0.57255),
(0.80392, 0.58824, 0.58824),
(0.81961, 0.58824, 0.60392),
(0.83922, 0.58431, 0.62353),
(0.85490, 0.58039, 0.64314),
(0.87451, 0.58039, 0.65882),
(0.89804, 0.58039, 0.68235),
(0.92157, 0.57647, 0.70588),
(0.91373, 0.58039, 0.72941),
(0.90588, 0.57647, 0.75686),
(0.89804, 0.58039, 0.78431),
(0.89412, 0.58039, 0.75686),
(0.88627, 0.58039, 0.72941),
(0.88235, 0.58431, 0.70588),
(0.87843, 0.58824, 0.68235),
(0.87451, 0.58824, 0.65882),
(0.87059, 0.59216, 0.64314),
(0.86275, 0.59608, 0.62353),
(0.86275, 0.60000, 0.60392),
(0.85882, 0.60392, 0.58824),
(0.85882, 0.60784, 0.57255),
(0.85490, 0.61176, 0.55686),
(0.85098, 0.61961, 0.54118),
(0.85490, 0.62745, 0.52941),
(0.85098, 0.63529, 0.51373),
(0.85098, 0.63922, 0.50588),
(0.85098, 0.65490, 0.49412),
(0.85098, 0.67059, 0.48627),
(0.85490, 0.68235, 0.47843),
(0.85098, 0.69804, 0.47059),
(0.85490, 0.71373, 0.46275),
(0.85098, 0.74118, 0.45882),
(0.85490, 0.75686, 0.45490),
(0.85882, 0.78039, 0.45098),
(0.86275, 0.80392, 0.44706),
(0.86275, 0.82353, 0.44706),
(0.86667, 0.85098, 0.44706),
(0.87059, 0.87843, 0.44706),
(0.87451, 0.90980, 0.44706),
(0.87843, 0.93725, 0.45098),
(0.88235, 0.97255, 0.45490),
(0.88627, 0.96471, 0.45882),
(0.89020, 0.96078, 0.46275),
(0.89804, 0.95686, 0.47059),
(0.90196, 0.95294, 0.47843),
(0.90588, 0.94902, 0.48627),
(0.91373, 0.94902, 0.49412),
(0.91765, 0.94510, 0.50588),
(0.92549, 0.94118, 0.51373),
(0.92941, 0.94510, 0.52941),
(0.94118, 0.94118, 0.54118),
(0.94902, 0.94510, 0.55686),
(0.95294, 0.94510, 0.57255),
(0.96078, 0.94510, 0.58824),
(0.97255, 0.94902, 0.60392),
(0.98039, 0.94902, 0.62745),
(0.98824, 0.95294, 0.65098),
(0.99608, 0.95686, 0.67451),
(1.00000, 0.95686, 0.70588),
(1.00000, 0.96078, 0.73333),
(1.00000, 0.96863, 0.76863),
(1.00000, 0.97255, 0.80392),
(1.00000, 0.98039, 0.84314),
(1.00000, 0.98431, 0.82353),
(0.98824, 0.99216, 0.80784),
(0.96078, 0.99608, 0.79608),
(0.93333, 1.00000, 0.78431),
(0.90980, 1.00000, 0.77255),
(0.88235, 1.00000, 0.76471),
(0.85882, 1.00000, 0.75686),
(0.83137, 1.00000, 0.74902),
(0.80784, 1.00000, 0.74118),
(0.78039, 1.00000, 0.73725),
(0.75686, 1.00000, 0.73333),
(0.73333, 1.00000, 0.72941),
(0.70588, 1.00000, 0.72941),
(0.69020, 1.00000, 0.72941),
(0.66667, 0.97255, 0.72941),
(0.64314, 0.93725, 0.73333),
(0.62353, 0.90196, 0.73725),
(0.60392, 0.86667, 0.74118),
(0.58431, 0.83137, 0.74510),
(0.56078, 0.79608, 0.75294),
(0.54510, 0.76078, 0.75686),
(0.52941, 0.73333, 0.76863),
(0.51373, 0.69804, 0.77647),
(0.50196, 0.67059, 0.78824),
(0.48627, 0.63922, 0.80392),
(0.47059, 0.61176, 0.81569),
(0.45490, 0.58039, 0.83137),
(0.44314, 0.55294, 0.84706),
(0.43529, 0.52941, 0.86667),
(0.42353, 0.50588, 0.88235),
(0.41569, 0.47843, 0.90196),
(0.41176, 0.45490, 0.92549),
(0.40392, 0.43529, 0.87843),
(0.40000, 0.41569, 0.83922),
(0.39216, 0.39216, 0.79608),
(0.38824, 0.37647, 0.75686),
(0.38431, 0.35686, 0.72157),
(0.38039, 0.34118, 0.67843),
(0.38039, 0.32941, 0.65098),
(0.37255, 0.31765, 0.61569),
(0.37255, 0.30588, 0.58431),
(0.37255, 0.29804, 0.55686),
(0.37255, 0.29412, 0.52549),
(0.37647, 0.28627, 0.50196),
(0.37647, 0.28235, 0.47843),
(0.38039, 0.28235, 0.45882),
(0.38431, 0.27843, 0.43922),
(0.38824, 0.27843, 0.41961),
(0.39608, 0.27843, 0.40784),
(0.40000, 0.28235, 0.40000),
(0.40784, 0.28627, 0.38824),
(0.41569, 0.29020, 0.38431),
(0.42353, 0.29804, 0.38039),
(0.43529, 0.30588, 0.37647),
(0.44706, 0.31765, 0.37647),
(0.45882, 0.32549, 0.38039),
(0.47059, 0.34118, 0.38431),
(0.48235, 0.35294, 0.39216),
(0.50196, 0.37255, 0.40000),
(0.51765, 0.39216, 0.40784),
(0.53725, 0.41176, 0.42353),
(0.55686, 0.43922, 0.43529),
(0.57647, 0.46667, 0.45098),
(0.60000, 0.49804, 0.47451),
(0.62745, 0.52941, 0.49804),
(0.65490, 0.56078, 0.52549),
(0.68235, 0.59216, 0.54902),
(0.70980, 0.62745, 0.58431),
(0.74118, 0.66275, 0.61569),
(0.77255, 0.70196, 0.65098),
(0.80392, 0.74118, 0.69020),
(0.83529, 0.78039, 0.73333),
(0.87059, 0.82353, 0.78431),
)
cmap_smooth = (
(0.00000, 0.00000, 1.00000),
(0.01569, 0.00000, 0.98431),
(0.03529, 0.00000, 0.96471),
(0.05098, 0.00000, 0.94902),
(0.06667, 0.00000, 0.93333),
(0.08627, 0.00000, 0.91373),
(0.10196, 0.00000, 0.89804),
(0.11765, 0.00000, 0.88235),
(0.13725, 0.00000, 0.86275),
(0.15294, 0.00000, 0.84706),
(0.16863, 0.00000, 0.83137),
(0.18824, 0.00000, 0.81176),
(0.20392, 0.00000, 0.79608),
(0.21961, 0.00000, 0.78039),
(0.23922, 0.00000, 0.76078),
(0.25490, 0.00000, 0.74510),
(0.27059, 0.00000, 0.72941),
(0.28627, 0.00000, 0.71373),
(0.30588, 0.00000, 0.69412),
(0.32157, 0.00000, 0.67843),
(0.33725, 0.00000, 0.66275),
(0.35686, 0.00000, 0.64314),
(0.37255, 0.00000, 0.62745),
(0.38824, 0.00000, 0.61176),
(0.40784, 0.00000, 0.59216),
(0.42353, 0.00000, 0.57647),
(0.43922, 0.00000, 0.56078),
(0.45882, 0.00000, 0.54118),
(0.47451, 0.00000, 0.52549),
(0.49020, 0.00000, 0.50980),
(0.50980, 0.00000, 0.49020),
(0.52549, 0.00000, 0.47451),
(0.54118, 0.00000, 0.45882),
(0.56078, 0.00000, 0.43922),
(0.57647, 0.00000, 0.42353),
(0.59216, 0.00000, 0.40784),
(0.61176, 0.00000, 0.38824),
(0.62745, 0.00000, 0.37255),
(0.64314, 0.00000, 0.35686),
(0.66275, 0.00000, 0.33725),
(0.67843, 0.00000, 0.32157),
(0.69412, 0.00000, 0.30588),
(0.71373, 0.00000, 0.28627),
(0.72941, 0.00000, 0.27059),
(0.74510, 0.00000, 0.25490),
(0.76078, 0.00000, 0.23922),
(0.78039, 0.00000, 0.21961),
(0.79608, 0.00000, 0.20392),
(0.81176, 0.00000, 0.18824),
(0.83137, 0.00000, 0.16863),
(0.84706, 0.00000, 0.15294),
(0.86275, 0.00000, 0.13725),
(0.88235, 0.00000, 0.11765),
(0.89804, 0.00000, 0.10196),
(0.91373, 0.00000, 0.08627),
(0.93333, 0.00000, 0.06667),
(0.94902, 0.00000, 0.05098),
(0.96471, 0.00000, 0.03529),
(0.98431, 0.00000, 0.01569),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.01176, 0.00000),
(1.00000, 0.01961, 0.00000),
(1.00000, 0.03137, 0.00000),
(1.00000, 0.03922, 0.00000),
(1.00000, 0.05098, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.07059, 0.00000),
(1.00000, 0.08235, 0.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.10196, 0.00000),
(1.00000, 0.10980, 0.00000),
(1.00000, 0.12157, 0.00000),
(1.00000, 0.12941, 0.00000),
(1.00000, 0.14118, 0.00000),
(0.99608, 0.15294, 0.00000),
(0.99608, 0.16078, 0.00000),
(0.99608, 0.17255, 0.00000),
(0.99608, 0.18039, 0.00000),
(0.99608, 0.19216, 0.00000),
(0.99608, 0.20392, 0.00000),
(0.99608, 0.21176, 0.00000),
(0.99608, 0.22353, 0.00000),
(0.99608, 0.23137, 0.00000),
(0.99608, 0.24314, 0.00000),
(0.99608, 0.25098, 0.00000),
(0.99608, 0.26275, 0.00000),
(0.99608, 0.27451, 0.00000),
(0.99608, 0.28235, 0.00000),
(0.99608, 0.29412, 0.00000),
(0.99608, 0.30196, 0.00000),
(0.99608, 0.31373, 0.00000),
(0.99608, 0.32157, 0.00000),
(0.99608, 0.33333, 0.00000),
(0.99608, 0.34510, 0.00000),
(0.99608, 0.35294, 0.00000),
(0.99608, 0.36471, 0.00000),
(0.99608, 0.37255, 0.00000),
(0.99608, 0.38431, 0.00000),
(0.99608, 0.39216, 0.00000),
(0.99608, 0.40392, 0.00000),
(0.99608, 0.41569, 0.00000),
(0.99608, 0.42353, 0.00000),
(0.99608, 0.43529, 0.00000),
(0.99608, 0.44314, 0.00000),
(0.99216, 0.45490, 0.00000),
(0.99216, 0.46667, 0.00000),
(0.99216, 0.47451, 0.00000),
(0.99216, 0.48627, 0.00000),
(0.99216, 0.49412, 0.00000),
(0.99216, 0.50588, 0.00000),
(0.99216, 0.51373, 0.00000),
(0.99216, 0.52549, 0.00000),
(0.99216, 0.53725, 0.00000),
(0.99216, 0.54510, 0.00000),
(0.99216, 0.55686, 0.00000),
(0.99216, 0.56471, 0.00000),
(0.99216, 0.57647, 0.00000),
(0.99216, 0.58431, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.60000, 0.00000),
(0.99216, 0.60784, 0.00000),
(0.99216, 0.61176, 0.00000),
(0.99216, 0.61569, 0.00000),
(0.99216, 0.61961, 0.00000),
(0.99216, 0.62745, 0.00000),
(0.99216, 0.63137, 0.00000),
(0.99216, 0.63529, 0.00000),
(0.99216, 0.64314, 0.00000),
(0.98824, 0.64706, 0.00000),
(0.98824, 0.65098, 0.00000),
(0.98824, 0.65882, 0.00000),
(0.98824, 0.66275, 0.00000),
(0.98824, 0.66667, 0.00000),
(0.98824, 0.67451, 0.00000),
(0.98824, 0.67843, 0.00000),
(0.98824, 0.68235, 0.00000),
(0.98824, 0.68627, 0.00000),
(0.98824, 0.69412, 0.00000),
(0.98824, 0.69804, 0.00000),
(0.98824, 0.70196, 0.00000),
(0.98824, 0.70980, 0.00000),
(0.98824, 0.71373, 0.00000),
(0.98824, 0.71765, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72941, 0.00000),
(0.98824, 0.73333, 0.00000),
(0.98824, 0.73725, 0.00000),
(0.98824, 0.74510, 0.00000),
(0.98824, 0.74902, 0.00000),
(0.98431, 0.75294, 0.00000),
(0.98431, 0.76078, 0.00000),
(0.98431, 0.76471, 0.00000),
(0.98431, 0.76863, 0.00000),
(0.98431, 0.77255, 0.00000),
(0.98431, 0.78039, 0.00000),
(0.98431, 0.78431, 0.00000),
(0.98431, 0.78824, 0.00000),
(0.98431, 0.79608, 0.00000),
(0.98431, 0.80000, 0.00000),
(0.98431, 0.80392, 0.00000),
(0.98431, 0.81176, 0.00000),
(0.98431, 0.81569, 0.00000),
(0.98431, 0.81961, 0.00000),
(0.98431, 0.82745, 0.00000),
(0.98431, 0.83137, 0.00000),
(0.98431, 0.83529, 0.00000),
(0.98431, 0.83922, 0.00000),
(0.98431, 0.84706, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98039, 0.85490, 0.00000),
(0.98039, 0.86275, 0.00000),
(0.98039, 0.86667, 0.00000),
(0.98039, 0.87059, 0.00000),
(0.98039, 0.87843, 0.00000),
(0.98039, 0.88235, 0.00000),
(0.98039, 0.88627, 0.00000),
(0.98039, 0.89020, 0.00000),
(0.98039, 0.89804, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.96471, 0.88627, 0.00000),
(0.94902, 0.87059, 0.00000),
(0.92941, 0.85490, 0.00000),
(0.91373, 0.83922, 0.00000),
(0.89804, 0.82745, 0.00000),
(0.88235, 0.81176, 0.00000),
(0.86275, 0.79608, 0.00000),
(0.84706, 0.78039, 0.00000),
(0.83137, 0.76471, 0.00000),
(0.81569, 0.74902, 0.00000),
(0.79608, 0.73333, 0.00000),
(0.78039, 0.71765, 0.00000),
(0.76471, 0.70196, 0.00000),
(0.74902, 0.68627, 0.00000),
(0.72941, 0.67451, 0.00000),
(0.71373, 0.65882, 0.00000),
(0.69804, 0.64314, 0.00000),
(0.68235, 0.62745, 0.00000),
(0.66275, 0.61176, 0.00000),
(0.64706, 0.59608, 0.00000),
(0.63137, 0.58039, 0.00000),
(0.61569, 0.56471, 0.00000),
(0.60000, 0.54902, 0.00000),
(0.58039, 0.53333, 0.00000),
(0.56471, 0.52157, 0.00000),
(0.54902, 0.50588, 0.00000),
(0.53333, 0.49020, 0.00000),
(0.51373, 0.47451, 0.00000),
(0.49804, 0.45882, 0.00000),
(0.48235, 0.44314, 0.00000),
(0.46667, 0.42745, 0.00000),
(0.44706, 0.41176, 0.00000),
(0.43137, 0.39608, 0.00000),
(0.41569, 0.38039, 0.00000),
(0.40000, 0.36863, 0.00000),
(0.38039, 0.35294, 0.00000),
(0.36471, 0.33725, 0.00000),
(0.34902, 0.32157, 0.00000),
(0.33333, 0.30588, 0.00000),
(0.31765, 0.29020, 0.00000),
(0.29804, 0.27451, 0.00000),
(0.28235, 0.25882, 0.00000),
(0.26667, 0.24314, 0.00000),
(0.25098, 0.22745, 0.00000),
(0.23137, 0.21569, 0.00000),
(0.21569, 0.20000, 0.00000),
(0.20000, 0.18431, 0.00000),
(0.18431, 0.16863, 0.00000),
(0.16471, 0.15294, 0.00000),
(0.14902, 0.13725, 0.00000),
(0.13333, 0.12157, 0.00000),
(0.11765, 0.10588, 0.00000),
(0.09804, 0.09020, 0.00000),
(0.08235, 0.07451, 0.00000),
(0.06667, 0.06275, 0.00000),
(0.05098, 0.04706, 0.00000),
(0.03137, 0.03137, 0.00000),
(0.01569, 0.01569, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
)
cmap_isophot = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.11765),
(0.00000, 0.00000, 0.15686),
(0.00000, 0.00000, 0.19608),
(0.00000, 0.00000, 0.23529),
(0.00000, 0.00000, 0.27843),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.35686),
(0.00000, 0.00000, 0.39608),
(0.00000, 0.00000, 0.43529),
(0.00000, 0.00000, 0.47451),
(0.00000, 0.00000, 0.51765),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.59608),
(0.00000, 0.00000, 0.63529),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.71765),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 0.00000, 0.87843),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.95686),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03137, 1.00000),
(0.00000, 0.06275, 1.00000),
(0.00000, 0.09412, 1.00000),
(0.00000, 0.12549, 1.00000),
(0.00000, 0.15686, 1.00000),
(0.00000, 0.18824, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.28627, 1.00000),
(0.00000, 0.31765, 1.00000),
(0.00000, 0.34902, 1.00000),
(0.00000, 0.38039, 1.00000),
(0.00000, 0.41176, 1.00000),
(0.00000, 0.44314, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.49804, 1.00000),
(0.00000, 0.51765, 1.00000),
(0.00000, 0.53725, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.63922, 1.00000),
(0.00000, 0.65882, 1.00000),
(0.00000, 0.67843, 1.00000),
(0.00000, 0.70196, 1.00000),
(0.00000, 0.72157, 1.00000),
(0.00000, 0.74118, 1.00000),
(0.00000, 0.76078, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.79608, 1.00000),
(0.00000, 0.81176, 1.00000),
(0.00000, 0.82353, 1.00000),
(0.00000, 0.83922, 1.00000),
(0.00000, 0.85490, 1.00000),
(0.00000, 0.86667, 1.00000),
(0.00000, 0.88235, 1.00000),
(0.00000, 0.89412, 1.00000),
(0.00000, 0.90980, 1.00000),
(0.00000, 0.92549, 1.00000),
(0.00000, 0.93725, 1.00000),
(0.00000, 0.95294, 1.00000),
(0.00000, 0.96863, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.96078),
(0.00000, 1.00000, 0.94118),
(0.00000, 1.00000, 0.92157),
(0.00000, 1.00000, 0.90196),
(0.00000, 1.00000, 0.88235),
(0.00000, 1.00000, 0.86275),
(0.00000, 1.00000, 0.84314),
(0.00000, 1.00000, 0.82353),
(0.00000, 1.00000, 0.80392),
(0.00000, 1.00000, 0.78431),
(0.00000, 1.00000, 0.76471),
(0.00000, 1.00000, 0.74510),
(0.00000, 1.00000, 0.72549),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65490),
(0.00000, 1.00000, 0.60784),
(0.00000, 1.00000, 0.56078),
(0.00000, 1.00000, 0.51373),
(0.00000, 1.00000, 0.46667),
(0.00000, 1.00000, 0.41961),
(0.00000, 1.00000, 0.37255),
(0.00000, 1.00000, 0.32549),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.13725),
(0.00000, 1.00000, 0.09020),
(0.00000, 1.00000, 0.04314),
(0.00000, 1.00000, 0.00000),
(0.04706, 1.00000, 0.00000),
(0.09412, 1.00000, 0.00000),
(0.14118, 1.00000, 0.00000),
(0.18824, 1.00000, 0.00000),
(0.23529, 1.00000, 0.00000),
(0.28235, 1.00000, 0.00000),
(0.32941, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.42353, 1.00000, 0.00000),
(0.47059, 1.00000, 0.00000),
(0.51765, 1.00000, 0.00000),
(0.56471, 1.00000, 0.00000),
(0.61176, 1.00000, 0.00000),
(0.65882, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.72549, 1.00000, 0.00000),
(0.74510, 1.00000, 0.00000),
(0.76471, 1.00000, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.84314, 1.00000, 0.00000),
(0.86275, 1.00000, 0.00000),
(0.88235, 1.00000, 0.00000),
(0.90196, 1.00000, 0.00000),
(0.92157, 1.00000, 0.00000),
(0.94118, 1.00000, 0.00000),
(0.96078, 1.00000, 0.00000),
(0.98039, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.98039, 0.00000),
(0.99608, 0.96078, 0.00000),
(0.99608, 0.94118, 0.00000),
(0.99608, 0.92549, 0.00000),
(0.99216, 0.90588, 0.00000),
(0.99216, 0.88627, 0.00000),
(0.99216, 0.87059, 0.00000),
(0.99216, 0.85098, 0.00000),
(0.98824, 0.83137, 0.00000),
(0.98824, 0.81569, 0.00000),
(0.98824, 0.79608, 0.00000),
(0.98824, 0.77647, 0.00000),
(0.98824, 0.76078, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.98824, 0.69020, 0.00000),
(0.98824, 0.67059, 0.00000),
(0.98824, 0.65490, 0.00000),
(0.98824, 0.63922, 0.00000),
(0.98824, 0.61961, 0.00000),
(0.99216, 0.60392, 0.00000),
(0.99216, 0.58824, 0.00000),
(0.99216, 0.56863, 0.00000),
(0.99216, 0.55294, 0.00000),
(0.99608, 0.53725, 0.00000),
(0.99608, 0.51765, 0.00000),
(0.99608, 0.50196, 0.00000),
(0.99608, 0.48627, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.43529, 0.00000),
(1.00000, 0.40392, 0.00000),
(1.00000, 0.37255, 0.00000),
(1.00000, 0.34118, 0.00000),
(1.00000, 0.30980, 0.00000),
(1.00000, 0.27843, 0.00000),
(1.00000, 0.24706, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.02745, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.04706),
(1.00000, 0.00000, 0.09412),
(1.00000, 0.00000, 0.14118),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.00000, 0.32941),
(1.00000, 0.00000, 0.37647),
(1.00000, 0.00000, 0.42353),
(1.00000, 0.00000, 0.47059),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.56471),
(1.00000, 0.00000, 0.61176),
(1.00000, 0.00000, 0.65882),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.72549),
(1.00000, 0.00000, 0.74902),
(1.00000, 0.00000, 0.77255),
(1.00000, 0.00000, 0.79608),
(1.00000, 0.00000, 0.81569),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.88627),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.92941),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.97647),
(1.00000, 0.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.14118, 1.00000),
(1.00000, 0.17647, 1.00000),
(1.00000, 0.21176, 1.00000),
(1.00000, 0.25098, 1.00000),
(1.00000, 0.28627, 1.00000),
(1.00000, 0.32157, 1.00000),
(1.00000, 0.36078, 1.00000),
(1.00000, 0.39608, 1.00000),
(1.00000, 0.43137, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.48627, 1.00000),
(1.00000, 0.50588, 1.00000),
(1.00000, 0.52157, 1.00000),
(1.00000, 0.54118, 1.00000),
(1.00000, 0.56078, 1.00000),
(1.00000, 0.57647, 1.00000),
(1.00000, 0.59608, 1.00000),
(1.00000, 0.61176, 1.00000),
(1.00000, 0.63137, 1.00000),
(1.00000, 0.65098, 1.00000),
(1.00000, 0.66667, 1.00000),
(1.00000, 0.68627, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.74510, 1.00000),
(1.00000, 0.78824, 1.00000),
(1.00000, 0.83137, 1.00000),
(1.00000, 0.87059, 1.00000),
(1.00000, 0.91373, 1.00000),
(1.00000, 0.95686, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_smooth2 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.06667, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.13333, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.20000, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.26667, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.33333, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.40000, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.46667, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.53333),
(0.53333, 0.00000, 0.46667),
(0.53333, 0.00000, 0.46667),
(0.53333, 0.00000, 0.46667),
(0.53333, 0.00000, 0.46667),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.40000),
(0.60000, 0.00000, 0.33333),
(0.60000, 0.00000, 0.33333),
(0.60000, 0.00000, 0.33333),
(0.60000, 0.00000, 0.33333),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.26667),
(0.66667, 0.00000, 0.20000),
(0.66667, 0.00000, 0.20000),
(0.66667, 0.00000, 0.20000),
(0.66667, 0.00000, 0.20000),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.13333),
(0.73333, 0.00000, 0.06667),
(0.73333, 0.00000, 0.06667),
(0.73333, 0.00000, 0.06667),
(0.73333, 0.00000, 0.06667),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.06667, 0.00000),
(1.00000, 0.06667, 0.00000),
(1.00000, 0.13333, 0.00000),
(1.00000, 0.13333, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.26667, 0.00000),
(1.00000, 0.26667, 0.00000),
(1.00000, 0.33333, 0.00000),
(1.00000, 0.33333, 0.00000),
(1.00000, 0.40000, 0.00000),
(1.00000, 0.40000, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.53333, 0.00000),
(1.00000, 0.53333, 0.00000),
(1.00000, 0.60000, 0.00000),
(1.00000, 0.60000, 0.00000),
(1.00000, 0.66667, 0.00000),
(1.00000, 0.66667, 0.00000),
(1.00000, 0.73333, 0.00000),
(1.00000, 0.73333, 0.00000),
(1.00000, 0.80000, 0.00000),
(1.00000, 0.80000, 0.00000),
(1.00000, 0.86667, 0.00000),
(1.00000, 0.86667, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.06667),
(1.00000, 1.00000, 0.06667),
(1.00000, 1.00000, 0.13333),
(1.00000, 1.00000, 0.13333),
(1.00000, 1.00000, 0.20000),
(1.00000, 1.00000, 0.20000),
(1.00000, 1.00000, 0.26667),
(1.00000, 1.00000, 0.26667),
(1.00000, 1.00000, 0.33333),
(1.00000, 1.00000, 0.33333),
(1.00000, 1.00000, 0.40000),
(1.00000, 1.00000, 0.40000),
(1.00000, 1.00000, 0.46667),
(1.00000, 1.00000, 0.46667),
(1.00000, 1.00000, 0.53333),
(1.00000, 1.00000, 0.53333),
(1.00000, 1.00000, 0.60000),
(1.00000, 1.00000, 0.60000),
(1.00000, 1.00000, 0.66667),
(1.00000, 1.00000, 0.66667),
(1.00000, 1.00000, 0.73333),
(1.00000, 1.00000, 0.73333),
(1.00000, 1.00000, 0.80000),
(1.00000, 1.00000, 0.80000),
(1.00000, 1.00000, 0.86667),
(1.00000, 1.00000, 1.00000),
)
cmap_heat = (
(0.00000, 0.00000, 0.00000),
(0.01176, 0.00392, 0.00000),
(0.02353, 0.00784, 0.00000),
(0.03529, 0.01176, 0.00000),
(0.04706, 0.01569, 0.00000),
(0.05882, 0.01961, 0.00000),
(0.07059, 0.02353, 0.00000),
(0.08235, 0.02745, 0.00000),
(0.09412, 0.03137, 0.00000),
(0.10588, 0.03529, 0.00000),
(0.11765, 0.03922, 0.00000),
(0.12941, 0.04314, 0.00000),
(0.14118, 0.04706, 0.00000),
(0.15294, 0.05098, 0.00000),
(0.16471, 0.05490, 0.00000),
(0.17647, 0.05882, 0.00000),
(0.18824, 0.06275, 0.00000),
(0.20000, 0.06667, 0.00000),
(0.21176, 0.07059, 0.00000),
(0.22353, 0.07451, 0.00000),
(0.23529, 0.07843, 0.00000),
(0.24706, 0.08235, 0.00000),
(0.25882, 0.08627, 0.00000),
(0.27059, 0.09020, 0.00000),
(0.28235, 0.09412, 0.00000),
(0.29412, 0.09804, 0.00000),
(0.30588, 0.10196, 0.00000),
(0.31765, 0.10588, 0.00000),
(0.32941, 0.10980, 0.00000),
(0.34118, 0.11373, 0.00000),
(0.35294, 0.11765, 0.00000),
(0.36471, 0.12157, 0.00000),
(0.37647, 0.12549, 0.00000),
(0.38824, 0.12941, 0.00000),
(0.40000, 0.13333, 0.00000),
(0.41176, 0.13725, 0.00000),
(0.42353, 0.14118, 0.00000),
(0.43529, 0.14510, 0.00000),
(0.44706, 0.14902, 0.00000),
(0.45882, 0.15294, 0.00000),
(0.47059, 0.15686, 0.00000),
(0.48235, 0.16078, 0.00000),
(0.49412, 0.16471, 0.00000),
(0.50588, 0.16863, 0.00000),
(0.51765, 0.17255, 0.00000),
(0.52941, 0.17647, 0.00000),
(0.54118, 0.18039, 0.00000),
(0.55294, 0.18431, 0.00000),
(0.56471, 0.18824, 0.00000),
(0.57647, 0.19216, 0.00000),
(0.58824, 0.19608, 0.00000),
(0.60000, 0.20000, 0.00000),
(0.61176, 0.20392, 0.00000),
(0.62353, 0.20784, 0.00000),
(0.63529, 0.21176, 0.00000),
(0.64706, 0.21569, 0.00000),
(0.65882, 0.21961, 0.00000),
(0.67059, 0.22353, 0.00000),
(0.68235, 0.22745, 0.00000),
(0.69412, 0.23137, 0.00000),
(0.70588, 0.23529, 0.00000),
(0.71765, 0.23922, 0.00000),
(0.72941, 0.24314, 0.00000),
(0.74118, 0.24706, 0.00000),
(0.75294, 0.25098, 0.00000),
(0.76471, 0.25490, 0.00000),
(0.77647, 0.25882, 0.00000),
(0.78824, 0.26275, 0.00000),
(0.80000, 0.26667, 0.00000),
(0.81176, 0.27059, 0.00000),
(0.82353, 0.27451, 0.00000),
(0.83529, 0.27843, 0.00000),
(0.84706, 0.28235, 0.00000),
(0.85882, 0.28627, 0.00000),
(0.87059, 0.29020, 0.00000),
(0.88235, 0.29412, 0.00000),
(0.89412, 0.29804, 0.00000),
(0.90588, 0.30196, 0.00000),
(0.91765, 0.30588, 0.00000),
(0.92941, 0.30980, 0.00000),
(0.94118, 0.31373, 0.00000),
(0.95294, 0.31765, 0.00000),
(0.96471, 0.32157, 0.00000),
(0.97647, 0.32549, 0.00000),
(0.98824, 0.32941, 0.00000),
(1.00000, 0.33333, 0.00000),
(1.00000, 0.33725, 0.00000),
(1.00000, 0.34118, 0.00000),
(1.00000, 0.34510, 0.00000),
(1.00000, 0.34902, 0.00000),
(1.00000, 0.35294, 0.00000),
(1.00000, 0.35686, 0.00000),
(1.00000, 0.36078, 0.00000),
(1.00000, 0.36471, 0.00000),
(1.00000, 0.36863, 0.00000),
(1.00000, 0.37255, 0.00000),
(1.00000, 0.37647, 0.00000),
(1.00000, 0.38039, 0.00000),
(1.00000, 0.38431, 0.00000),
(1.00000, 0.38824, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39608, 0.00000),
(1.00000, 0.40000, 0.00000),
(1.00000, 0.40392, 0.00000),
(1.00000, 0.40784, 0.00000),
(1.00000, 0.41176, 0.00000),
(1.00000, 0.41569, 0.00000),
(1.00000, 0.41961, 0.00000),
(1.00000, 0.42353, 0.00000),
(1.00000, 0.42745, 0.00000),
(1.00000, 0.43137, 0.00000),
(1.00000, 0.43529, 0.00000),
(1.00000, 0.43922, 0.00000),
(1.00000, 0.44314, 0.00000),
(1.00000, 0.44706, 0.00000),
(1.00000, 0.45098, 0.00000),
(1.00000, 0.45490, 0.00000),
(1.00000, 0.45882, 0.00000),
(1.00000, 0.46275, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47451, 0.00000),
(1.00000, 0.47843, 0.00000),
(1.00000, 0.48235, 0.00000),
(1.00000, 0.48627, 0.00000),
(1.00000, 0.49020, 0.00000),
(1.00000, 0.49412, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.50196, 0.00000),
(1.00000, 0.50588, 0.00000),
(1.00000, 0.50980, 0.00000),
(1.00000, 0.51373, 0.00000),
(1.00000, 0.51765, 0.00000),
(1.00000, 0.52157, 0.00000),
(1.00000, 0.52549, 0.00000),
(1.00000, 0.52941, 0.00000),
(1.00000, 0.53333, 0.00000),
(1.00000, 0.53725, 0.00000),
(1.00000, 0.54118, 0.00000),
(1.00000, 0.54510, 0.00000),
(1.00000, 0.54902, 0.00000),
(1.00000, 0.55294, 0.00000),
(1.00000, 0.55686, 0.00000),
(1.00000, 0.56078, 0.00000),
(1.00000, 0.56471, 0.00000),
(1.00000, 0.56863, 0.00000),
(1.00000, 0.57255, 0.00000),
(1.00000, 0.57647, 0.00000),
(1.00000, 0.58039, 0.00000),
(1.00000, 0.58431, 0.00000),
(1.00000, 0.58824, 0.00000),
(1.00000, 0.59216, 0.00000),
(1.00000, 0.59608, 0.00000),
(1.00000, 0.60000, 0.00000),
(1.00000, 0.60392, 0.00000),
(1.00000, 0.60784, 0.00000),
(1.00000, 0.61176, 0.00000),
(1.00000, 0.61569, 0.00000),
(1.00000, 0.61961, 0.00000),
(1.00000, 0.62353, 0.00000),
(1.00000, 0.62745, 0.00000),
(1.00000, 0.63137, 0.00000),
(1.00000, 0.63529, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.64314, 0.00000),
(1.00000, 0.64706, 0.00000),
(1.00000, 0.65098, 0.01176),
(1.00000, 0.65490, 0.02353),
(1.00000, 0.65882, 0.03529),
(1.00000, 0.66275, 0.04706),
(1.00000, 0.66667, 0.05882),
(1.00000, 0.67059, 0.07059),
(1.00000, 0.67451, 0.08235),
(1.00000, 0.67843, 0.09412),
(1.00000, 0.68235, 0.10588),
(1.00000, 0.68627, 0.11765),
(1.00000, 0.69020, 0.12941),
(1.00000, 0.69412, 0.14118),
(1.00000, 0.69804, 0.15294),
(1.00000, 0.70196, 0.16471),
(1.00000, 0.70588, 0.17647),
(1.00000, 0.70980, 0.18824),
(1.00000, 0.71373, 0.20000),
(1.00000, 0.71765, 0.21176),
(1.00000, 0.72157, 0.22353),
(1.00000, 0.72549, 0.23529),
(1.00000, 0.72941, 0.24706),
(1.00000, 0.73333, 0.25882),
(1.00000, 0.73725, 0.27059),
(1.00000, 0.74118, 0.28235),
(1.00000, 0.74510, 0.29412),
(1.00000, 0.74902, 0.30588),
(1.00000, 0.75294, 0.31765),
(1.00000, 0.75686, 0.32941),
(1.00000, 0.76078, 0.34118),
(1.00000, 0.76471, 0.35294),
(1.00000, 0.76863, 0.36471),
(1.00000, 0.77255, 0.37647),
(1.00000, 0.77647, 0.38824),
(1.00000, 0.78039, 0.40000),
(1.00000, 0.78431, 0.41176),
(1.00000, 0.78824, 0.42353),
(1.00000, 0.79216, 0.43529),
(1.00000, 0.79608, 0.44706),
(1.00000, 0.80000, 0.45882),
(1.00000, 0.80392, 0.47059),
(1.00000, 0.80784, 0.48235),
(1.00000, 0.81176, 0.49412),
(1.00000, 0.81569, 0.50588),
(1.00000, 0.81961, 0.51765),
(1.00000, 0.82353, 0.52941),
(1.00000, 0.82745, 0.54118),
(1.00000, 0.83137, 0.55294),
(1.00000, 0.83529, 0.56471),
(1.00000, 0.83922, 0.57647),
(1.00000, 0.84314, 0.58824),
(1.00000, 0.84706, 0.60000),
(1.00000, 0.85098, 0.61176),
(1.00000, 0.85490, 0.62353),
(1.00000, 0.85882, 0.63529),
(1.00000, 0.86275, 0.64706),
(1.00000, 0.86667, 0.65882),
(1.00000, 0.87059, 0.67059),
(1.00000, 0.87451, 0.68235),
(1.00000, 0.87843, 0.69412),
(1.00000, 0.88235, 0.70588),
(1.00000, 0.88627, 0.71765),
(1.00000, 0.89020, 0.72941),
(1.00000, 0.89412, 0.74118),
(1.00000, 0.89804, 0.75294),
(1.00000, 0.90196, 0.76471),
(1.00000, 0.90588, 0.77647),
(1.00000, 0.90980, 0.78824),
(1.00000, 0.91373, 0.80000),
(1.00000, 0.91765, 0.81176),
(1.00000, 0.92157, 0.82353),
(1.00000, 0.92549, 0.83529),
(1.00000, 0.92941, 0.84706),
(1.00000, 0.93333, 0.85882),
(1.00000, 0.93725, 0.87059),
(1.00000, 0.94118, 0.88235),
(1.00000, 0.94510, 0.89412),
(1.00000, 0.94902, 0.90588),
(1.00000, 0.95294, 0.91765),
(1.00000, 0.95686, 0.92941),
(1.00000, 0.96078, 0.94118),
(1.00000, 0.96471, 0.95294),
(1.00000, 0.96863, 0.96471),
(1.00000, 0.97255, 0.97647),
(1.00000, 0.97647, 0.98824),
(1.00000, 0.98039, 1.00000),
(1.00000, 0.98431, 1.00000),
(1.00000, 0.98824, 1.00000),
(1.00000, 0.99216, 1.00000),
(1.00000, 0.99608, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_smooth3 = (
(0.00000, 0.00000, 0.00784),
(0.00000, 0.00000, 0.01795),
(0.00000, 0.00000, 0.03087),
(0.00000, 0.00000, 0.04434),
(0.00000, 0.00000, 0.05781),
(0.00000, 0.00000, 0.07128),
(0.00000, 0.00000, 0.08475),
(0.00000, 0.00000, 0.09822),
(0.00000, 0.00000, 0.11170),
(0.00000, 0.00000, 0.12231),
(0.00000, 0.00000, 0.13472),
(0.00000, 0.00000, 0.14819),
(0.00000, 0.00000, 0.16166),
(0.00000, 0.00000, 0.17513),
(0.00000, 0.00000, 0.18851),
(0.00000, 0.00000, 0.19862),
(0.00000, 0.00000, 0.21163),
(0.00000, 0.00000, 0.22510),
(0.00000, 0.00000, 0.23857),
(0.00000, 0.00000, 0.25080),
(0.00000, 0.00000, 0.26228),
(0.00000, 0.00000, 0.27885),
(0.00000, 0.00000, 0.28895),
(0.00000, 0.00000, 0.30201),
(0.00000, 0.00000, 0.31308),
(0.00000, 0.00000, 0.32503),
(0.00000, 0.00000, 0.33850),
(0.00000, 0.00000, 0.35197),
(0.00000, 0.00000, 0.36526),
(0.00000, 0.00000, 0.37536),
(0.00000, 0.00000, 0.39146),
(0.00000, 0.00000, 0.40341),
(0.00000, 0.00000, 0.41541),
(0.00000, 0.00000, 0.42754),
(0.00000, 0.00000, 0.43922),
(0.00000, 0.00000, 0.45559),
(0.00000, 0.00000, 0.46570),
(0.00000, 0.00000, 0.47885),
(0.00000, 0.00000, 0.49232),
(0.00000, 0.00000, 0.50579),
(0.00000, 0.00000, 0.51788),
(0.00000, 0.00000, 0.52881),
(0.00000, 0.00000, 0.54228),
(0.00000, 0.00000, 0.55576),
(0.00000, 0.00000, 0.56923),
(0.00000, 0.00000, 0.58016),
(0.00397, 0.00000, 0.59225),
(0.01356, 0.00000, 0.60572),
(0.02791, 0.00000, 0.61919),
(0.04507, 0.00000, 0.63234),
(0.06528, 0.00000, 0.64245),
(0.08235, 0.00000, 0.65569),
(0.10178, 0.00000, 0.66916),
(0.12198, 0.00000, 0.68263),
(0.14219, 0.00000, 0.69462),
(0.16240, 0.00000, 0.70565),
(0.18261, 0.00000, 0.71912),
(0.20281, 0.00000, 0.73259),
(0.21984, 0.00000, 0.74607),
(0.23668, 0.00000, 0.75954),
(0.25559, 0.00000, 0.77093),
(0.27580, 0.00000, 0.78256),
(0.29504, 0.00000, 0.79603),
(0.31063, 0.00000, 0.80909),
(0.31737, 0.00000, 0.81919),
(0.31765, 0.00000, 0.83575),
(0.31765, 0.00000, 0.84992),
(0.31765, 0.00000, 0.86339),
(0.31765, 0.00000, 0.87686),
(0.31765, 0.00000, 0.88932),
(0.31719, 0.00000, 0.89942),
(0.31382, 0.00000, 0.90953),
(0.31373, 0.00000, 0.92291),
(0.31373, 0.00000, 0.93638),
(0.31373, 0.00000, 0.94985),
(0.31373, 0.00000, 0.96332),
(0.31373, 0.00000, 0.97573),
(0.40250, 0.05175, 0.86307),
(0.99189, 0.39528, 0.05813),
(1.00000, 0.40000, 0.04706),
(0.84776, 0.30312, 0.04983),
(0.55402, 0.11438, 0.05319),
(0.38644, 0.00000, 0.05988),
(0.40664, 0.00000, 0.07442),
(0.42685, 0.00000, 0.09799),
(0.44706, 0.00000, 0.12157),
(0.46727, 0.00000, 0.13841),
(0.48466, 0.00000, 0.15806),
(0.50376, 0.00000, 0.17827),
(0.52397, 0.00000, 0.20018),
(0.54417, 0.00000, 0.22261),
(0.56438, 0.00000, 0.24281),
(0.58459, 0.00000, 0.26302),
(0.60480, 0.00000, 0.28323),
(0.62501, 0.00000, 0.30344),
(0.64521, 0.00000, 0.32364),
(0.66542, 0.00000, 0.34385),
(0.68563, 0.00000, 0.36406),
(0.70584, 0.00000, 0.38491),
(0.72604, 0.00000, 0.40840),
(0.74625, 0.00000, 0.42860),
(0.76937, 0.00000, 0.44881),
(0.79059, 0.00000, 0.46667),
(0.81260, 0.00000, 0.48711),
(0.83493, 0.00000, 0.50944),
(0.85513, 0.00000, 0.52964),
(0.87576, 0.00000, 0.54985),
(0.90607, 0.00000, 0.57006),
(0.93933, 0.00000, 0.59027),
(0.96821, 0.00000, 0.61047),
(0.98777, 0.00000, 0.63068),
(0.99737, 0.00000, 0.65089),
(1.00000, 0.00000, 0.67110),
(1.00000, 0.00092, 0.69149),
(1.00000, 0.01776, 0.71506),
(1.00000, 0.03760, 0.73564),
(1.00000, 0.05781, 0.75585),
(1.00000, 0.07802, 0.77606),
(1.00000, 0.09822, 0.79626),
(1.00000, 0.11922, 0.81647),
(1.00000, 0.14279, 0.83668),
(1.00000, 0.16637, 0.85689),
(1.00000, 0.18690, 0.88014),
(1.00000, 0.20960, 0.90122),
(1.00000, 0.23124, 0.92143),
(1.00000, 0.25144, 0.94164),
(1.00000, 0.27165, 0.96185),
(1.00000, 0.29214, 0.98178),
(1.00000, 0.31571, 0.99862),
(1.00000, 0.33310, 0.98454),
(1.00000, 0.35248, 0.96517),
(1.00000, 0.37269, 0.94496),
(1.00000, 0.39290, 0.92332),
(1.00000, 0.41223, 0.89974),
(1.00000, 0.42907, 0.87649),
(1.00000, 0.44591, 0.85628),
(1.00000, 0.46588, 0.83294),
(1.00000, 0.48609, 0.81195),
(1.00000, 0.50630, 0.79174),
(1.00000, 0.52503, 0.77006),
(1.00000, 0.54279, 0.74740),
(1.00000, 0.56263, 0.72720),
(1.00000, 0.57947, 0.70699),
(1.00000, 0.59949, 0.68360),
(1.00000, 0.61707, 0.66265),
(1.00000, 0.62976, 0.64037),
(1.00000, 0.63682, 0.61831),
(1.00000, 0.63922, 0.59811),
(1.00000, 0.63922, 0.57790),
(1.00000, 0.63922, 0.55769),
(1.00000, 0.63922, 0.53425),
(1.00000, 0.63922, 0.51335),
(1.00000, 0.63922, 0.49102),
(1.00000, 0.63922, 0.46902),
(1.00000, 0.63922, 0.44881),
(1.00000, 0.63922, 0.42860),
(1.00000, 0.63922, 0.40839),
(1.00000, 0.63922, 0.38491),
(1.00000, 0.63922, 0.36406),
(1.00000, 0.63922, 0.34168),
(0.99838, 0.63922, 0.31972),
(0.99077, 0.63922, 0.29845),
(0.97241, 0.63922, 0.27589),
(0.94546, 0.63922, 0.25905),
(0.91520, 0.63922, 0.23557),
(0.88489, 0.63922, 0.21476),
(0.85457, 0.63922, 0.19234),
(0.82426, 0.63922, 0.16876),
(0.79395, 0.63922, 0.14629),
(0.76309, 0.63922, 0.12664),
(0.72941, 0.63922, 0.10980),
(0.70583, 0.63922, 0.08622),
(0.69070, 0.63922, 0.06265),
(0.67949, 0.63922, 0.04133),
(0.67793, 0.64092, 0.02113),
(0.68582, 0.64545, 0.00554),
(0.69772, 0.65278, 0.00000),
(0.71793, 0.66307, 0.00000),
(0.73814, 0.68665, 0.00000),
(0.75834, 0.71022, 0.00000),
(0.77855, 0.73380, 0.00000),
(0.79876, 0.75738, 0.00000),
(0.81897, 0.78095, 0.00000),
(0.83982, 0.80517, 0.00000),
(0.86330, 0.83202, 0.00000),
(0.88351, 0.85560, 0.00000),
(0.90372, 0.88208, 0.00000),
(0.92393, 0.90667, 0.00000),
(0.94413, 0.93025, 0.00000),
(0.96434, 0.95382, 0.00000),
(0.98386, 0.97740, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00305),
(1.00000, 1.00000, 0.01638),
(1.00000, 1.00000, 0.03322),
(1.00000, 1.00000, 0.05006),
(1.00000, 1.00000, 0.06773),
(1.00000, 1.00000, 0.08794),
(1.00000, 1.00000, 0.10815),
(1.00000, 1.00000, 0.12526),
(1.00000, 1.00000, 0.14464),
(1.00000, 1.00000, 0.16485),
(1.00000, 1.00000, 0.18506),
(1.00000, 1.00000, 0.20439),
(1.00000, 1.00000, 0.22155),
(1.00000, 1.00000, 0.24176),
(1.00000, 1.00000, 0.25883),
(1.00000, 1.00000, 0.27567),
(1.00000, 1.00000, 0.28642),
(1.00000, 1.00000, 0.28872),
(1.00000, 1.00000, 0.28350),
(1.00000, 1.00000, 0.27229),
(1.00000, 1.00000, 0.25208),
(1.00000, 1.00000, 0.22869),
(1.00000, 1.00000, 0.20511),
(1.00000, 1.00000, 0.18154),
(1.00000, 1.00000, 0.15796),
(1.00000, 1.00000, 0.13536),
(1.00000, 1.00000, 0.11473),
(1.00000, 1.00000, 0.09116),
(1.00000, 1.00000, 0.06435),
(1.00000, 1.00000, 0.04008),
(1.00000, 1.00000, 0.02076),
(1.00000, 1.00000, 0.00706),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.09305),
(1.00000, 1.00000, 0.33136),
(1.00000, 1.00000, 0.60966),
(1.00000, 1.00000, 0.83605),
(0.96674, 1.00000, 0.96343),
(0.75749, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
)
cmap_rainbow = (
(0.00000, 0.00000, 0.16471),
(0.02745, 0.00000, 0.18431),
(0.05882, 0.00000, 0.20000),
(0.08627, 0.00000, 0.21961),
(0.11373, 0.00000, 0.23922),
(0.14510, 0.00000, 0.25882),
(0.17647, 0.00000, 0.27843),
(0.20392, 0.00000, 0.29804),
(0.23137, 0.00000, 0.31765),
(0.26275, 0.00000, 0.33725),
(0.29412, 0.00000, 0.35686),
(0.32157, 0.00000, 0.37647),
(0.35294, 0.00000, 0.39608),
(0.38039, 0.00000, 0.41569),
(0.41176, 0.00000, 0.43529),
(0.43922, 0.00000, 0.45490),
(0.47059, 0.00000, 0.47451),
(0.49804, 0.00000, 0.49412),
(0.52941, 0.00000, 0.51373),
(0.55686, 0.00000, 0.53725),
(0.58824, 0.00000, 0.55686),
(0.55686, 0.00000, 0.57647),
(0.52941, 0.00000, 0.59608),
(0.49804, 0.00000, 0.61569),
(0.47059, 0.00000, 0.63922),
(0.43922, 0.00000, 0.65882),
(0.41176, 0.00000, 0.67843),
(0.38039, 0.00000, 0.70196),
(0.35294, 0.00000, 0.72157),
(0.32157, 0.00000, 0.74118),
(0.29412, 0.00000, 0.76471),
(0.26275, 0.00000, 0.78431),
(0.23137, 0.00000, 0.80392),
(0.20392, 0.00000, 0.82745),
(0.17647, 0.00000, 0.84706),
(0.14510, 0.00000, 0.87059),
(0.11373, 0.00000, 0.89020),
(0.08627, 0.00000, 0.91373),
(0.05882, 0.00000, 0.93333),
(0.02745, 0.00000, 0.95686),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.02353, 0.97647),
(0.00000, 0.04706, 0.95686),
(0.00000, 0.06275, 0.93333),
(0.00000, 0.08235, 0.91373),
(0.00000, 0.09804, 0.89020),
(0.00000, 0.11373, 0.87059),
(0.00000, 0.12941, 0.84706),
(0.00000, 0.14118, 0.82745),
(0.00000, 0.15686, 0.80392),
(0.00000, 0.16863, 0.78431),
(0.00000, 0.18431, 0.76471),
(0.00000, 0.19608, 0.74118),
(0.00000, 0.21176, 0.72157),
(0.00000, 0.22353, 0.70196),
(0.00000, 0.23529, 0.67843),
(0.00000, 0.25098, 0.65882),
(0.00000, 0.26275, 0.63922),
(0.00000, 0.27451, 0.61569),
(0.00000, 0.28627, 0.59608),
(0.00000, 0.29804, 0.57647),
(0.00000, 0.30980, 0.55686),
(0.00000, 0.32157, 0.53725),
(0.00000, 0.33333, 0.51373),
(0.00000, 0.34510, 0.49412),
(0.00000, 0.35686, 0.47451),
(0.00000, 0.36863, 0.45490),
(0.00000, 0.38039, 0.43529),
(0.00000, 0.39216, 0.41569),
(0.00000, 0.40392, 0.39608),
(0.00000, 0.41176, 0.37647),
(0.00000, 0.42353, 0.35686),
(0.00000, 0.43529, 0.33725),
(0.00000, 0.44706, 0.31765),
(0.00000, 0.45882, 0.29804),
(0.00000, 0.46667, 0.27843),
(0.00000, 0.47843, 0.25882),
(0.00000, 0.49020, 0.23922),
(0.00000, 0.49804, 0.21961),
(0.00000, 0.50980, 0.20000),
(0.00000, 0.52157, 0.18431),
(0.00000, 0.52941, 0.16471),
(0.00000, 0.54118, 0.14510),
(0.00000, 0.55294, 0.12941),
(0.00000, 0.56078, 0.10980),
(0.00000, 0.57255, 0.09412),
(0.00000, 0.58431, 0.07451),
(0.00000, 0.59216, 0.05882),
(0.00000, 0.60392, 0.04314),
(0.00000, 0.61176, 0.02745),
(0.00000, 0.62353, 0.01176),
(0.00000, 0.63137, 0.00000),
(0.00000, 0.64314, 0.00000),
(0.00000, 0.65098, 0.00000),
(0.00000, 0.66275, 0.00000),
(0.00000, 0.67059, 0.00000),
(0.00000, 0.68235, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.70196, 0.00000),
(0.00000, 0.70980, 0.00000),
(0.00000, 0.72157, 0.00000),
(0.00000, 0.72941, 0.00000),
(0.00000, 0.74118, 0.00000),
(0.00000, 0.74902, 0.00000),
(0.00000, 0.76078, 0.00000),
(0.00000, 0.76863, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00784, 0.76863, 0.00000),
(0.03529, 0.77647, 0.00000),
(0.06667, 0.78824, 0.00000),
(0.09804, 0.80000, 0.00000),
(0.12941, 0.81176, 0.00000),
(0.16471, 0.82745, 0.00000),
(0.20000, 0.84314, 0.00000),
(0.23529, 0.85882, 0.00000),
(0.26667, 0.87059, 0.00000),
(0.30588, 0.89020, 0.00000),
(0.34118, 0.90196, 0.00000),
(0.37647, 0.92157, 0.00000),
(0.41176, 0.93333, 0.00000),
(0.44706, 0.95294, 0.00000),
(0.48627, 0.96863, 0.00000),
(0.52157, 0.98824, 0.00000),
(0.56078, 1.00000, 0.00000),
(0.59608, 1.00000, 0.00000),
(0.63529, 1.00000, 0.00000),
(0.67059, 1.00000, 0.00000),
(0.70980, 1.00000, 0.00000),
(0.74902, 1.00000, 0.00000),
(0.78431, 1.00000, 0.00000),
(0.82353, 1.00000, 0.00000),
(0.85882, 1.00000, 0.00000),
(0.89804, 1.00000, 0.00000),
(0.93333, 1.00000, 0.00000),
(0.97647, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 1.00000, 0.00000),
(0.98039, 1.00000, 0.00000),
(0.96078, 0.97647, 0.00000),
(0.94510, 0.93725, 0.00000),
(0.92549, 0.89804, 0.00000),
(0.90980, 0.85882, 0.00000),
(0.89412, 0.81961, 0.00000),
(0.87451, 0.78039, 0.00000),
(0.85882, 0.74118, 0.00000),
(0.83922, 0.70196, 0.00000),
(0.82353, 0.66275, 0.00000),
(0.80392, 0.62353, 0.00000),
(0.78824, 0.58431, 0.00000),
(0.76863, 0.54510, 0.00000),
(0.75686, 0.50980, 0.00000),
(0.74118, 0.46667, 0.00000),
(0.72549, 0.43137, 0.00000),
(0.70980, 0.39216, 0.00000),
(0.69412, 0.35294, 0.00000),
(0.68235, 0.31765, 0.00000),
(0.66275, 0.27451, 0.00000),
(0.65098, 0.23922, 0.00000),
(0.63529, 0.20000, 0.00000),
(0.62745, 0.16863, 0.00000),
(0.61569, 0.12941, 0.00000),
(0.60784, 0.09804, 0.00000),
(0.61961, 0.08235, 0.00000),
(0.62745, 0.06275, 0.00000),
(0.63922, 0.04706, 0.00000),
(0.64706, 0.02353, 0.00000),
(0.65882, 0.00000, 0.00000),
(0.66667, 0.00000, 0.00000),
(0.67843, 0.00000, 0.00000),
(0.68627, 0.00000, 0.00000),
(0.69804, 0.00000, 0.00000),
(0.70980, 0.00000, 0.00000),
(0.71765, 0.00000, 0.00000),
(0.72941, 0.00000, 0.00000),
(0.73725, 0.00000, 0.00000),
(0.74902, 0.00000, 0.00000),
(0.75686, 0.00000, 0.00000),
(0.76863, 0.00000, 0.00000),
(0.77647, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.80000, 0.00784, 0.00784),
(0.80784, 0.02745, 0.02745),
(0.81961, 0.05098, 0.05098),
(0.82745, 0.08235, 0.08235),
(0.83922, 0.11373, 0.11373),
(0.84706, 0.14902, 0.14902),
(0.85882, 0.19216, 0.19216),
(0.86667, 0.23137, 0.23137),
(0.87843, 0.27843, 0.27843),
(0.88627, 0.32549, 0.32549),
(0.89804, 0.37647, 0.37647),
(0.90980, 0.43137, 0.43137),
(0.91765, 0.48627, 0.48627),
(0.92941, 0.54118, 0.54118),
(0.93725, 0.60000, 0.60000),
(0.94902, 0.66275, 0.66275),
(0.95686, 0.72549, 0.72549),
(0.96863, 0.79216, 0.79216),
(0.97647, 0.85882, 0.85882),
(0.98824, 0.92941, 0.92941),
(1.00000, 1.00000, 1.00000),
)
cmap_manycol = (
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.34902, 0.34902, 0.34902),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.44706, 0.78431, 0.92549),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69020, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 0.69020, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.72549, 0.00000, 0.72549),
(0.72549, 0.00000, 0.72549),
)
cmap_gray = (
(0.00000, 0.00000, 0.00000),
(0.00392, 0.00392, 0.00392),
(0.00784, 0.00784, 0.00784),
(0.01176, 0.01176, 0.01176),
(0.01569, 0.01569, 0.01569),
(0.01961, 0.01961, 0.01961),
(0.02353, 0.02353, 0.02353),
(0.02745, 0.02745, 0.02745),
(0.03137, 0.03137, 0.03137),
(0.03529, 0.03529, 0.03529),
(0.03922, 0.03922, 0.03922),
(0.04314, 0.04314, 0.04314),
(0.04706, 0.04706, 0.04706),
(0.05098, 0.05098, 0.05098),
(0.05490, 0.05490, 0.05490),
(0.05882, 0.05882, 0.05882),
(0.06275, 0.06275, 0.06275),
(0.06667, 0.06667, 0.06667),
(0.07059, 0.07059, 0.07059),
(0.07451, 0.07451, 0.07451),
(0.07843, 0.07843, 0.07843),
(0.08235, 0.08235, 0.08235),
(0.08627, 0.08627, 0.08627),
(0.09020, 0.09020, 0.09020),
(0.09412, 0.09412, 0.09412),
(0.09804, 0.09804, 0.09804),
(0.10196, 0.10196, 0.10196),
(0.10588, 0.10588, 0.10588),
(0.10980, 0.10980, 0.10980),
(0.11373, 0.11373, 0.11373),
(0.11765, 0.11765, 0.11765),
(0.12157, 0.12157, 0.12157),
(0.12549, 0.12549, 0.12549),
(0.12941, 0.12941, 0.12941),
(0.13333, 0.13333, 0.13333),
(0.13725, 0.13725, 0.13725),
(0.14118, 0.14118, 0.14118),
(0.14510, 0.14510, 0.14510),
(0.14902, 0.14902, 0.14902),
(0.15294, 0.15294, 0.15294),
(0.15686, 0.15686, 0.15686),
(0.16078, 0.16078, 0.16078),
(0.16471, 0.16471, 0.16471),
(0.16863, 0.16863, 0.16863),
(0.17255, 0.17255, 0.17255),
(0.17647, 0.17647, 0.17647),
(0.18039, 0.18039, 0.18039),
(0.18431, 0.18431, 0.18431),
(0.18824, 0.18824, 0.18824),
(0.19216, 0.19216, 0.19216),
(0.19608, 0.19608, 0.19608),
(0.20000, 0.20000, 0.20000),
(0.20392, 0.20392, 0.20392),
(0.20784, 0.20784, 0.20784),
(0.21176, 0.21176, 0.21176),
(0.21569, 0.21569, 0.21569),
(0.21961, 0.21961, 0.21961),
(0.22353, 0.22353, 0.22353),
(0.22745, 0.22745, 0.22745),
(0.23137, 0.23137, 0.23137),
(0.23529, 0.23529, 0.23529),
(0.23922, 0.23922, 0.23922),
(0.24314, 0.24314, 0.24314),
(0.24706, 0.24706, 0.24706),
(0.25098, 0.25098, 0.25098),
(0.25490, 0.25490, 0.25490),
(0.25882, 0.25882, 0.25882),
(0.26275, 0.26275, 0.26275),
(0.26667, 0.26667, 0.26667),
(0.27059, 0.27059, 0.27059),
(0.27451, 0.27451, 0.27451),
(0.27843, 0.27843, 0.27843),
(0.28235, 0.28235, 0.28235),
(0.28627, 0.28627, 0.28627),
(0.29020, 0.29020, 0.29020),
(0.29412, 0.29412, 0.29412),
(0.29804, 0.29804, 0.29804),
(0.30196, 0.30196, 0.30196),
(0.30588, 0.30588, 0.30588),
(0.30980, 0.30980, 0.30980),
(0.31373, 0.31373, 0.31373),
(0.31765, 0.31765, 0.31765),
(0.32157, 0.32157, 0.32157),
(0.32549, 0.32549, 0.32549),
(0.32941, 0.32941, 0.32941),
(0.33333, 0.33333, 0.33333),
(0.33725, 0.33725, 0.33725),
(0.34118, 0.34118, 0.34118),
(0.34510, 0.34510, 0.34510),
(0.34902, 0.34902, 0.34902),
(0.35294, 0.35294, 0.35294),
(0.35686, 0.35686, 0.35686),
(0.36078, 0.36078, 0.36078),
(0.36471, 0.36471, 0.36471),
(0.36863, 0.36863, 0.36863),
(0.37255, 0.37255, 0.37255),
(0.37647, 0.37647, 0.37647),
(0.38039, 0.38039, 0.38039),
(0.38431, 0.38431, 0.38431),
(0.38824, 0.38824, 0.38824),
(0.39216, 0.39216, 0.39216),
(0.39608, 0.39608, 0.39608),
(0.40000, 0.40000, 0.40000),
(0.40392, 0.40392, 0.40392),
(0.40784, 0.40784, 0.40784),
(0.41176, 0.41176, 0.41176),
(0.41569, 0.41569, 0.41569),
(0.41961, 0.41961, 0.41961),
(0.42353, 0.42353, 0.42353),
(0.42745, 0.42745, 0.42745),
(0.43137, 0.43137, 0.43137),
(0.43529, 0.43529, 0.43529),
(0.43922, 0.43922, 0.43922),
(0.44314, 0.44314, 0.44314),
(0.44706, 0.44706, 0.44706),
(0.45098, 0.45098, 0.45098),
(0.45490, 0.45490, 0.45490),
(0.45882, 0.45882, 0.45882),
(0.46275, 0.46275, 0.46275),
(0.46667, 0.46667, 0.46667),
(0.47059, 0.47059, 0.47059),
(0.47451, 0.47451, 0.47451),
(0.47843, 0.47843, 0.47843),
(0.48235, 0.48235, 0.48235),
(0.48627, 0.48627, 0.48627),
(0.49020, 0.49020, 0.49020),
(0.49412, 0.49412, 0.49412),
(0.49804, 0.49804, 0.49804),
(0.50196, 0.50196, 0.50196),
(0.50588, 0.50588, 0.50588),
(0.50980, 0.50980, 0.50980),
(0.51373, 0.51373, 0.51373),
(0.51765, 0.51765, 0.51765),
(0.52157, 0.52157, 0.52157),
(0.52549, 0.52549, 0.52549),
(0.52941, 0.52941, 0.52941),
(0.53333, 0.53333, 0.53333),
(0.53725, 0.53725, 0.53725),
(0.54118, 0.54118, 0.54118),
(0.54510, 0.54510, 0.54510),
(0.54902, 0.54902, 0.54902),
(0.55294, 0.55294, 0.55294),
(0.55686, 0.55686, 0.55686),
(0.56078, 0.56078, 0.56078),
(0.56471, 0.56471, 0.56471),
(0.56863, 0.56863, 0.56863),
(0.57255, 0.57255, 0.57255),
(0.57647, 0.57647, 0.57647),
(0.58039, 0.58039, 0.58039),
(0.58431, 0.58431, 0.58431),
(0.58824, 0.58824, 0.58824),
(0.59608, 0.59608, 0.59608),
(0.60000, 0.60000, 0.60000),
(0.59608, 0.59608, 0.59608),
(0.60392, 0.60392, 0.60392),
(0.60784, 0.60784, 0.60784),
(0.61176, 0.61176, 0.61176),
(0.61569, 0.61569, 0.61569),
(0.61961, 0.61961, 0.61961),
(0.62353, 0.62353, 0.62353),
(0.62745, 0.62745, 0.62745),
(0.63137, 0.63137, 0.63137),
(0.63529, 0.63529, 0.63529),
(0.63922, 0.63922, 0.63922),
(0.64314, 0.64314, 0.64314),
(0.64706, 0.64706, 0.64706),
(0.65098, 0.65098, 0.65098),
(0.65490, 0.65490, 0.65490),
(0.65882, 0.65882, 0.65882),
(0.66275, 0.66275, 0.66275),
(0.66667, 0.66667, 0.66667),
(0.67059, 0.67059, 0.67059),
(0.67451, 0.67451, 0.67451),
(0.67843, 0.67843, 0.67843),
(0.68235, 0.68235, 0.68235),
(0.68627, 0.68627, 0.68627),
(0.69020, 0.69020, 0.69020),
(0.69412, 0.69412, 0.69412),
(0.69804, 0.69804, 0.69804),
(0.70196, 0.70196, 0.70196),
(0.70588, 0.70588, 0.70588),
(0.70980, 0.70980, 0.70980),
(0.71373, 0.71373, 0.71373),
(0.71765, 0.71765, 0.71765),
(0.72157, 0.72157, 0.72157),
(0.72549, 0.72549, 0.72549),
(0.72941, 0.72941, 0.72941),
(0.73333, 0.73333, 0.73333),
(0.73725, 0.73725, 0.73725),
(0.74118, 0.74118, 0.74118),
(0.74510, 0.74510, 0.74510),
(0.74902, 0.74902, 0.74902),
(0.75294, 0.75294, 0.75294),
(0.75686, 0.75686, 0.75686),
(0.76078, 0.76078, 0.76078),
(0.76471, 0.76471, 0.76471),
(0.76863, 0.76863, 0.76863),
(0.77255, 0.77255, 0.77255),
(0.77647, 0.77647, 0.77647),
(0.78039, 0.78039, 0.78039),
(0.78431, 0.78431, 0.78431),
(0.78824, 0.78824, 0.78824),
(0.79216, 0.79216, 0.79216),
(0.79608, 0.79608, 0.79608),
(0.80000, 0.80000, 0.80000),
(0.80392, 0.80392, 0.80392),
(0.80784, 0.80784, 0.80784),
(0.81176, 0.81176, 0.81176),
(0.81569, 0.81569, 0.81569),
(0.81961, 0.81961, 0.81961),
(0.82353, 0.82353, 0.82353),
(0.82745, 0.82745, 0.82745),
(0.83137, 0.83137, 0.83137),
(0.83529, 0.83529, 0.83529),
(0.83922, 0.83922, 0.83922),
(0.84314, 0.84314, 0.84314),
(0.84706, 0.84706, 0.84706),
(0.85098, 0.85098, 0.85098),
(0.85490, 0.85490, 0.85490),
(0.85882, 0.85882, 0.85882),
(0.86275, 0.86275, 0.86275),
(0.86667, 0.86667, 0.86667),
(0.87059, 0.87059, 0.87059),
(0.87451, 0.87451, 0.87451),
(0.87843, 0.87843, 0.87843),
(0.88235, 0.88235, 0.88235),
(0.88627, 0.88627, 0.88627),
(0.89020, 0.89020, 0.89020),
(0.89412, 0.89412, 0.89412),
(0.89804, 0.89804, 0.89804),
(0.90196, 0.90196, 0.90196),
(0.90588, 0.90588, 0.90588),
(0.90980, 0.90980, 0.90980),
(0.91373, 0.91373, 0.91373),
(0.91765, 0.91765, 0.91765),
(0.92157, 0.92157, 0.92157),
(0.92549, 0.92549, 0.92549),
(0.92941, 0.92941, 0.92941),
(0.93333, 0.93333, 0.93333),
(0.93725, 0.93725, 0.93725),
(0.94118, 0.94118, 0.94118),
(0.94510, 0.94510, 0.94510),
(0.94902, 0.94902, 0.94902),
(0.95294, 0.95294, 0.95294),
(0.95686, 0.95686, 0.95686),
(0.96078, 0.96078, 0.96078),
(0.96471, 0.96471, 0.96471),
(0.96863, 0.96863, 0.96863),
(0.97255, 0.97255, 0.97255),
(0.97647, 0.97647, 0.97647),
(0.98039, 0.98039, 0.98039),
(0.98431, 0.98431, 0.98431),
(0.98824, 0.98824, 0.98824),
(0.99216, 0.99216, 0.99216),
(0.99608, 0.99608, 0.99608),
(1.00000, 1.00000, 1.00000),
)
cmap_grayclip = (
# Like gray, but shows clipping of top and bottom 5%
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.01961, 0.01961, 0.01961),
(0.02353, 0.02353, 0.02353),
(0.02745, 0.02745, 0.02745),
(0.03137, 0.03137, 0.03137),
(0.03529, 0.03529, 0.03529),
(0.03922, 0.03922, 0.03922),
(0.04314, 0.04314, 0.04314),
(0.04706, 0.04706, 0.04706),
(0.05098, 0.05098, 0.05098),
(0.05490, 0.05490, 0.05490),
(0.05882, 0.05882, 0.05882),
(0.06275, 0.06275, 0.06275),
(0.06667, 0.06667, 0.06667),
(0.07059, 0.07059, 0.07059),
(0.07451, 0.07451, 0.07451),
(0.07843, 0.07843, 0.07843),
(0.08235, 0.08235, 0.08235),
(0.08627, 0.08627, 0.08627),
(0.09020, 0.09020, 0.09020),
(0.09412, 0.09412, 0.09412),
(0.09804, 0.09804, 0.09804),
(0.10196, 0.10196, 0.10196),
(0.10588, 0.10588, 0.10588),
(0.10980, 0.10980, 0.10980),
(0.11373, 0.11373, 0.11373),
(0.11765, 0.11765, 0.11765),
(0.12157, 0.12157, 0.12157),
(0.12549, 0.12549, 0.12549),
(0.12941, 0.12941, 0.12941),
(0.13333, 0.13333, 0.13333),
(0.13725, 0.13725, 0.13725),
(0.14118, 0.14118, 0.14118),
(0.14510, 0.14510, 0.14510),
(0.14902, 0.14902, 0.14902),
(0.15294, 0.15294, 0.15294),
(0.15686, 0.15686, 0.15686),
(0.16078, 0.16078, 0.16078),
(0.16471, 0.16471, 0.16471),
(0.16863, 0.16863, 0.16863),
(0.17255, 0.17255, 0.17255),
(0.17647, 0.17647, 0.17647),
(0.18039, 0.18039, 0.18039),
(0.18431, 0.18431, 0.18431),
(0.18824, 0.18824, 0.18824),
(0.19216, 0.19216, 0.19216),
(0.19608, 0.19608, 0.19608),
(0.20000, 0.20000, 0.20000),
(0.20392, 0.20392, 0.20392),
(0.20784, 0.20784, 0.20784),
(0.21176, 0.21176, 0.21176),
(0.21569, 0.21569, 0.21569),
(0.21961, 0.21961, 0.21961),
(0.22353, 0.22353, 0.22353),
(0.22745, 0.22745, 0.22745),
(0.23137, 0.23137, 0.23137),
(0.23529, 0.23529, 0.23529),
(0.23922, 0.23922, 0.23922),
(0.24314, 0.24314, 0.24314),
(0.24706, 0.24706, 0.24706),
(0.25098, 0.25098, 0.25098),
(0.25490, 0.25490, 0.25490),
(0.25882, 0.25882, 0.25882),
(0.26275, 0.26275, 0.26275),
(0.26667, 0.26667, 0.26667),
(0.27059, 0.27059, 0.27059),
(0.27451, 0.27451, 0.27451),
(0.27843, 0.27843, 0.27843),
(0.28235, 0.28235, 0.28235),
(0.28627, 0.28627, 0.28627),
(0.29020, 0.29020, 0.29020),
(0.29412, 0.29412, 0.29412),
(0.29804, 0.29804, 0.29804),
(0.30196, 0.30196, 0.30196),
(0.30588, 0.30588, 0.30588),
(0.30980, 0.30980, 0.30980),
(0.31373, 0.31373, 0.31373),
(0.31765, 0.31765, 0.31765),
(0.32157, 0.32157, 0.32157),
(0.32549, 0.32549, 0.32549),
(0.32941, 0.32941, 0.32941),
(0.33333, 0.33333, 0.33333),
(0.33725, 0.33725, 0.33725),
(0.34118, 0.34118, 0.34118),
(0.34510, 0.34510, 0.34510),
(0.34902, 0.34902, 0.34902),
(0.35294, 0.35294, 0.35294),
(0.35686, 0.35686, 0.35686),
(0.36078, 0.36078, 0.36078),
(0.36471, 0.36471, 0.36471),
(0.36863, 0.36863, 0.36863),
(0.37255, 0.37255, 0.37255),
(0.37647, 0.37647, 0.37647),
(0.38039, 0.38039, 0.38039),
(0.38431, 0.38431, 0.38431),
(0.38824, 0.38824, 0.38824),
(0.39216, 0.39216, 0.39216),
(0.39608, 0.39608, 0.39608),
(0.40000, 0.40000, 0.40000),
(0.40392, 0.40392, 0.40392),
(0.40784, 0.40784, 0.40784),
(0.41176, 0.41176, 0.41176),
(0.41569, 0.41569, 0.41569),
(0.41961, 0.41961, 0.41961),
(0.42353, 0.42353, 0.42353),
(0.42745, 0.42745, 0.42745),
(0.43137, 0.43137, 0.43137),
(0.43529, 0.43529, 0.43529),
(0.43922, 0.43922, 0.43922),
(0.44314, 0.44314, 0.44314),
(0.44706, 0.44706, 0.44706),
(0.45098, 0.45098, 0.45098),
(0.45490, 0.45490, 0.45490),
(0.45882, 0.45882, 0.45882),
(0.46275, 0.46275, 0.46275),
(0.46667, 0.46667, 0.46667),
(0.47059, 0.47059, 0.47059),
(0.47451, 0.47451, 0.47451),
(0.47843, 0.47843, 0.47843),
(0.48235, 0.48235, 0.48235),
(0.48627, 0.48627, 0.48627),
(0.49020, 0.49020, 0.49020),
(0.49412, 0.49412, 0.49412),
(0.49804, 0.49804, 0.49804),
(0.50196, 0.50196, 0.50196),
(0.50588, 0.50588, 0.50588),
(0.50980, 0.50980, 0.50980),
(0.51373, 0.51373, 0.51373),
(0.51765, 0.51765, 0.51765),
(0.52157, 0.52157, 0.52157),
(0.52549, 0.52549, 0.52549),
(0.52941, 0.52941, 0.52941),
(0.53333, 0.53333, 0.53333),
(0.53725, 0.53725, 0.53725),
(0.54118, 0.54118, 0.54118),
(0.54510, 0.54510, 0.54510),
(0.54902, 0.54902, 0.54902),
(0.55294, 0.55294, 0.55294),
(0.55686, 0.55686, 0.55686),
(0.56078, 0.56078, 0.56078),
(0.56471, 0.56471, 0.56471),
(0.56863, 0.56863, 0.56863),
(0.57255, 0.57255, 0.57255),
(0.57647, 0.57647, 0.57647),
(0.58039, 0.58039, 0.58039),
(0.58431, 0.58431, 0.58431),
(0.58824, 0.58824, 0.58824),
(0.59608, 0.59608, 0.59608),
(0.60000, 0.60000, 0.60000),
(0.59608, 0.59608, 0.59608),
(0.60392, 0.60392, 0.60392),
(0.60784, 0.60784, 0.60784),
(0.61176, 0.61176, 0.61176),
(0.61569, 0.61569, 0.61569),
(0.61961, 0.61961, 0.61961),
(0.62353, 0.62353, 0.62353),
(0.62745, 0.62745, 0.62745),
(0.63137, 0.63137, 0.63137),
(0.63529, 0.63529, 0.63529),
(0.63922, 0.63922, 0.63922),
(0.64314, 0.64314, 0.64314),
(0.64706, 0.64706, 0.64706),
(0.65098, 0.65098, 0.65098),
(0.65490, 0.65490, 0.65490),
(0.65882, 0.65882, 0.65882),
(0.66275, 0.66275, 0.66275),
(0.66667, 0.66667, 0.66667),
(0.67059, 0.67059, 0.67059),
(0.67451, 0.67451, 0.67451),
(0.67843, 0.67843, 0.67843),
(0.68235, 0.68235, 0.68235),
(0.68627, 0.68627, 0.68627),
(0.69020, 0.69020, 0.69020),
(0.69412, 0.69412, 0.69412),
(0.69804, 0.69804, 0.69804),
(0.70196, 0.70196, 0.70196),
(0.70588, 0.70588, 0.70588),
(0.70980, 0.70980, 0.70980),
(0.71373, 0.71373, 0.71373),
(0.71765, 0.71765, 0.71765),
(0.72157, 0.72157, 0.72157),
(0.72549, 0.72549, 0.72549),
(0.72941, 0.72941, 0.72941),
(0.73333, 0.73333, 0.73333),
(0.73725, 0.73725, 0.73725),
(0.74118, 0.74118, 0.74118),
(0.74510, 0.74510, 0.74510),
(0.74902, 0.74902, 0.74902),
(0.75294, 0.75294, 0.75294),
(0.75686, 0.75686, 0.75686),
(0.76078, 0.76078, 0.76078),
(0.76471, 0.76471, 0.76471),
(0.76863, 0.76863, 0.76863),
(0.77255, 0.77255, 0.77255),
(0.77647, 0.77647, 0.77647),
(0.78039, 0.78039, 0.78039),
(0.78431, 0.78431, 0.78431),
(0.78824, 0.78824, 0.78824),
(0.79216, 0.79216, 0.79216),
(0.79608, 0.79608, 0.79608),
(0.80000, 0.80000, 0.80000),
(0.80392, 0.80392, 0.80392),
(0.80784, 0.80784, 0.80784),
(0.81176, 0.81176, 0.81176),
(0.81569, 0.81569, 0.81569),
(0.81961, 0.81961, 0.81961),
(0.82353, 0.82353, 0.82353),
(0.82745, 0.82745, 0.82745),
(0.83137, 0.83137, 0.83137),
(0.83529, 0.83529, 0.83529),
(0.83922, 0.83922, 0.83922),
(0.84314, 0.84314, 0.84314),
(0.84706, 0.84706, 0.84706),
(0.85098, 0.85098, 0.85098),
(0.85490, 0.85490, 0.85490),
(0.85882, 0.85882, 0.85882),
(0.86275, 0.86275, 0.86275),
(0.86667, 0.86667, 0.86667),
(0.87059, 0.87059, 0.87059),
(0.87451, 0.87451, 0.87451),
(0.87843, 0.87843, 0.87843),
(0.88235, 0.88235, 0.88235),
(0.88627, 0.88627, 0.88627),
(0.89020, 0.89020, 0.89020),
(0.89412, 0.89412, 0.89412),
(0.89804, 0.89804, 0.89804),
(0.90196, 0.90196, 0.90196),
(0.90588, 0.90588, 0.90588),
(0.90980, 0.90980, 0.90980),
(0.91373, 0.91373, 0.91373),
(0.91765, 0.91765, 0.91765),
(0.92157, 0.92157, 0.92157),
(0.92549, 0.92549, 0.92549),
(0.92941, 0.92941, 0.92941),
(0.93333, 0.93333, 0.93333),
(0.93725, 0.93725, 0.93725),
(0.94118, 0.94118, 0.94118),
(0.94510, 0.94510, 0.94510),
(0.94902, 0.94902, 0.94902),
(0.95294, 0.95294, 0.95294),
(0.95686, 0.95686, 0.95686),
(0.96078, 0.96078, 0.96078),
(0.96471, 0.96471, 0.96471),
(0.96863, 0.96863, 0.96863),
(0.97255, 0.97255, 0.97255),
(0.97647, 0.97647, 0.97647),
(0.98039, 0.98039, 0.98039),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_pastel = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 1.00000),
(0.01961, 0.00000, 0.98039),
(0.05490, 0.00000, 0.94510),
(0.08627, 0.00392, 0.91373),
(0.10980, 0.00392, 0.89020),
(0.13725, 0.00392, 0.86275),
(0.15686, 0.00392, 0.84314),
(0.18039, 0.00392, 0.81961),
(0.20000, 0.00784, 0.80000),
(0.21569, 0.00784, 0.78431),
(0.23529, 0.00784, 0.76471),
(0.25098, 0.00784, 0.74902),
(0.26275, 0.01176, 0.73725),
(0.28235, 0.01176, 0.71765),
(0.29412, 0.01176, 0.70588),
(0.30588, 0.01176, 0.69412),
(0.31765, 0.01176, 0.68235),
(0.33333, 0.01569, 0.66667),
(0.34118, 0.01569, 0.65882),
(0.35294, 0.01569, 0.64706),
(0.36078, 0.01569, 0.63922),
(0.37255, 0.01961, 0.62745),
(0.38431, 0.01961, 0.61569),
(0.39216, 0.01961, 0.60784),
(0.40000, 0.01961, 0.60000),
(0.41176, 0.02353, 0.58824),
(0.41961, 0.02353, 0.58039),
(0.43137, 0.02353, 0.56863),
(0.43529, 0.02745, 0.56471),
(0.44314, 0.02745, 0.55686),
(0.45098, 0.02745, 0.54902),
(0.45882, 0.02745, 0.54118),
(0.46667, 0.03137, 0.53333),
(0.47059, 0.03137, 0.52941),
(0.48235, 0.03137, 0.51765),
(0.48627, 0.03529, 0.51373),
(0.49412, 0.03529, 0.50588),
(0.50196, 0.03529, 0.49804),
(0.50588, 0.03529, 0.49412),
(0.50980, 0.04314, 0.49020),
(0.51765, 0.04314, 0.48235),
(0.52157, 0.04314, 0.47843),
(0.53333, 0.04706, 0.46667),
(0.53725, 0.04706, 0.46275),
(0.54118, 0.04706, 0.45882),
(0.54902, 0.05098, 0.45098),
(0.55294, 0.05098, 0.44706),
(0.55686, 0.05098, 0.44314),
(0.56078, 0.05490, 0.43922),
(0.56471, 0.05490, 0.43529),
(0.57255, 0.05490, 0.42745),
(0.58039, 0.05882, 0.41961),
(0.58431, 0.05882, 0.41569),
(0.58824, 0.05882, 0.41176),
(0.59216, 0.06275, 0.40784),
(0.59608, 0.06275, 0.40392),
(0.60000, 0.06275, 0.40000),
(0.60392, 0.06667, 0.39608),
(0.60784, 0.06667, 0.39216),
(0.61176, 0.06667, 0.38824),
(0.61569, 0.07059, 0.38431),
(0.61961, 0.07059, 0.38039),
(0.62745, 0.07451, 0.37255),
(0.63137, 0.07451, 0.36863),
(0.63529, 0.07451, 0.36471),
(0.63922, 0.07843, 0.36078),
(0.64314, 0.07843, 0.35686),
(0.64706, 0.08235, 0.35294),
(0.65098, 0.08235, 0.34902),
(0.65490, 0.08235, 0.34510),
(0.65490, 0.08627, 0.34510),
(0.65882, 0.08627, 0.34118),
(0.66275, 0.09020, 0.33725),
(0.66667, 0.09020, 0.33333),
(0.67059, 0.09020, 0.32941),
(0.67843, 0.09412, 0.32157),
(0.67843, 0.09412, 0.32157),
(0.68235, 0.09804, 0.31765),
(0.68627, 0.09804, 0.31373),
(0.69020, 0.10196, 0.30980),
(0.69020, 0.10196, 0.30980),
(0.69412, 0.10588, 0.30588),
(0.69804, 0.10588, 0.30196),
(0.70196, 0.10980, 0.29804),
(0.70196, 0.10980, 0.29804),
(0.70588, 0.10980, 0.29412),
(0.70980, 0.11765, 0.29020),
(0.71373, 0.11765, 0.28627),
(0.71373, 0.12157, 0.28627),
(0.71765, 0.12157, 0.28235),
(0.72157, 0.12549, 0.27843),
(0.72157, 0.12549, 0.27843),
(0.72941, 0.12941, 0.27059),
(0.73333, 0.12941, 0.26667),
(0.73333, 0.13333, 0.26667),
(0.73725, 0.13725, 0.26275),
(0.74118, 0.13725, 0.25882),
(0.74118, 0.14118, 0.25882),
(0.74510, 0.14118, 0.25490),
(0.74510, 0.14510, 0.25490),
(0.74902, 0.14510, 0.25098),
(0.75294, 0.14902, 0.24706),
(0.75294, 0.14902, 0.24706),
(0.75686, 0.15294, 0.24314),
(0.76078, 0.15686, 0.23922),
(0.76078, 0.15686, 0.23922),
(0.76471, 0.16078, 0.23529),
(0.76471, 0.16078, 0.23529),
(0.76863, 0.16471, 0.23137),
(0.76863, 0.16863, 0.23137),
(0.77255, 0.16863, 0.22745),
(0.78039, 0.17255, 0.21961),
(0.78039, 0.17255, 0.21961),
(0.78431, 0.17647, 0.21569),
(0.78431, 0.18039, 0.21569),
(0.78824, 0.18039, 0.21176),
(0.78824, 0.18431, 0.21176),
(0.79216, 0.18824, 0.20784),
(0.79216, 0.18824, 0.20784),
(0.79608, 0.19608, 0.20392),
(0.79608, 0.20000, 0.20392),
(0.80000, 0.20000, 0.20000),
(0.80000, 0.20392, 0.20000),
(0.80392, 0.20784, 0.19608),
(0.80392, 0.20784, 0.19608),
(0.80784, 0.21176, 0.19216),
(0.80784, 0.21569, 0.19216),
(0.81176, 0.21961, 0.18824),
(0.81176, 0.21961, 0.18824),
(0.81569, 0.22353, 0.18431),
(0.81569, 0.22745, 0.18431),
(0.81961, 0.23137, 0.18039),
(0.81961, 0.23137, 0.18039),
(0.82745, 0.23529, 0.17255),
(0.82745, 0.23922, 0.17255),
(0.83137, 0.24314, 0.16863),
(0.83137, 0.24314, 0.16863),
(0.83529, 0.24706, 0.16471),
(0.83529, 0.25098, 0.16471),
(0.83922, 0.25490, 0.16078),
(0.83922, 0.25882, 0.16078),
(0.83922, 0.26275, 0.16078),
(0.84314, 0.26275, 0.15686),
(0.84314, 0.27059, 0.15686),
(0.84706, 0.27451, 0.15294),
(0.84706, 0.27843, 0.15294),
(0.85098, 0.28235, 0.14902),
(0.85098, 0.28627, 0.14902),
(0.85490, 0.29020, 0.14510),
(0.85490, 0.29412, 0.14510),
(0.85490, 0.29804, 0.14510),
(0.85882, 0.29804, 0.14118),
(0.85882, 0.30196, 0.14118),
(0.86275, 0.30588, 0.13725),
(0.86275, 0.30980, 0.13725),
(0.86275, 0.31373, 0.13725),
(0.86667, 0.31765, 0.13333),
(0.86667, 0.32157, 0.13333),
(0.87059, 0.32549, 0.12941),
(0.87059, 0.33333, 0.12941),
(0.87059, 0.33725, 0.12941),
(0.87843, 0.34118, 0.12157),
(0.87843, 0.34510, 0.12157),
(0.88235, 0.34902, 0.11765),
(0.88235, 0.35294, 0.11765),
(0.88235, 0.35686, 0.11765),
(0.88627, 0.36078, 0.11373),
(0.88627, 0.36471, 0.11373),
(0.89020, 0.37255, 0.10980),
(0.89020, 0.37647, 0.10980),
(0.89020, 0.38039, 0.10980),
(0.89412, 0.38431, 0.10588),
(0.89412, 0.38824, 0.10588),
(0.89412, 0.39216, 0.10588),
(0.89804, 0.40000, 0.10196),
(0.89804, 0.40392, 0.10196),
(0.89804, 0.40784, 0.10196),
(0.90196, 0.41176, 0.09804),
(0.90196, 0.41961, 0.09804),
(0.90588, 0.42353, 0.09412),
(0.90588, 0.42745, 0.09412),
(0.90588, 0.43529, 0.09412),
(0.90980, 0.43922, 0.09020),
(0.90980, 0.44314, 0.09020),
(0.90980, 0.45098, 0.09020),
(0.91373, 0.45490, 0.08627),
(0.91373, 0.45882, 0.08627),
(0.91373, 0.46667, 0.08627),
(0.91765, 0.47059, 0.08235),
(0.91765, 0.47843, 0.08235),
(0.91765, 0.48235, 0.08235),
(0.92157, 0.49020, 0.07843),
(0.92157, 0.49412, 0.07843),
(0.92157, 0.50196, 0.07843),
(0.92941, 0.50588, 0.07059),
(0.92941, 0.51373, 0.07059),
(0.92941, 0.51765, 0.07059),
(0.93333, 0.52549, 0.06667),
(0.93333, 0.52941, 0.06667),
(0.93333, 0.53725, 0.06667),
(0.93725, 0.54118, 0.06275),
(0.93725, 0.54902, 0.06275),
(0.93725, 0.55686, 0.06275),
(0.94118, 0.56078, 0.05882),
(0.94118, 0.56863, 0.05882),
(0.94118, 0.57647, 0.05882),
(0.94118, 0.58039, 0.05882),
(0.94510, 0.58824, 0.05490),
(0.94510, 0.59608, 0.05490),
(0.94510, 0.60000, 0.05490),
(0.94902, 0.60784, 0.05098),
(0.94902, 0.61569, 0.05098),
(0.94902, 0.62353, 0.05098),
(0.95294, 0.63137, 0.04706),
(0.95294, 0.63529, 0.04706),
(0.95294, 0.64314, 0.04706),
(0.95686, 0.65098, 0.04314),
(0.95686, 0.65882, 0.04314),
(0.95686, 0.66667, 0.04314),
(0.95686, 0.67451, 0.04314),
(0.96078, 0.68235, 0.03922),
(0.96078, 0.69020, 0.03922),
(0.96078, 0.69804, 0.03922),
(0.96471, 0.70588, 0.03529),
(0.96471, 0.71373, 0.03529),
(0.96471, 0.72157, 0.03529),
(0.96471, 0.72941, 0.03529),
(0.96863, 0.73725, 0.03137),
(0.96863, 0.74510, 0.03137),
(0.96863, 0.75294, 0.03137),
(0.97255, 0.76078, 0.02745),
(0.97255, 0.77255, 0.02745),
(0.97255, 0.78039, 0.02745),
(0.97255, 0.78824, 0.02745),
(0.98039, 0.79608, 0.01961),
(0.98039, 0.80392, 0.01961),
(0.98039, 0.81569, 0.01961),
(0.98039, 0.82353, 0.01961),
(0.98431, 0.83137, 0.01569),
(0.98431, 0.84314, 0.01569),
(0.98431, 0.85098, 0.01569),
(0.98824, 0.86275, 0.01176),
(0.98824, 0.87059, 0.01176),
(0.98824, 0.87843, 0.01176),
(0.98824, 0.89020, 0.01176),
(0.99216, 0.89804, 0.00784),
(0.99216, 0.90980, 0.00784),
(0.99216, 0.91765, 0.00784),
(0.99216, 0.92941, 0.00784),
(0.99608, 0.94118, 0.00392),
(0.99608, 0.94902, 0.00392),
(0.99608, 0.96078, 0.00392),
(0.99608, 0.97255, 0.00392),
(1.00000, 0.98039, 0.00000),
(1.00000, 0.99216, 0.00000),
)
cmap_light = (
(0.00000, 0.00392, 0.00000),
(0.00000, 0.00784, 0.01961),
(0.00000, 0.01176, 0.05490),
(0.00000, 0.01569, 0.08627),
(0.00000, 0.01961, 0.10980),
(0.00000, 0.02353, 0.13725),
(0.00000, 0.02745, 0.15686),
(0.00000, 0.03137, 0.18039),
(0.00000, 0.03529, 0.20000),
(0.00000, 0.03922, 0.21569),
(0.00000, 0.04314, 0.23529),
(0.00000, 0.04706, 0.25098),
(0.00000, 0.05098, 0.26275),
(0.00000, 0.05490, 0.28235),
(0.00000, 0.05882, 0.29412),
(0.00000, 0.06275, 0.30588),
(0.00000, 0.06667, 0.31765),
(0.00000, 0.07059, 0.33333),
(0.00000, 0.07451, 0.34118),
(0.00000, 0.07843, 0.35294),
(0.00000, 0.08235, 0.36078),
(0.00000, 0.08627, 0.37255),
(0.00000, 0.09020, 0.38431),
(0.00000, 0.09412, 0.39216),
(0.00392, 0.09804, 0.40000),
(0.00784, 0.10196, 0.41176),
(0.01176, 0.10588, 0.41961),
(0.01569, 0.10980, 0.43137),
(0.01569, 0.11373, 0.43529),
(0.01961, 0.11765, 0.44314),
(0.02353, 0.12157, 0.45098),
(0.02745, 0.12549, 0.45882),
(0.02745, 0.12941, 0.46667),
(0.03137, 0.13333, 0.47059),
(0.03529, 0.13725, 0.48235),
(0.04314, 0.14118, 0.48627),
(0.04706, 0.14510, 0.49412),
(0.05098, 0.14902, 0.50196),
(0.05882, 0.15294, 0.50588),
(0.06667, 0.15686, 0.50980),
(0.07451, 0.16078, 0.51765),
(0.08235, 0.16471, 0.52157),
(0.09020, 0.16863, 0.53333),
(0.09804, 0.17255, 0.53725),
(0.10588, 0.17647, 0.54118),
(0.11765, 0.18039, 0.54902),
(0.12941, 0.18431, 0.55294),
(0.14118, 0.18824, 0.55686),
(0.15294, 0.19216, 0.56078),
(0.16471, 0.19608, 0.56471),
(0.18039, 0.20000, 0.57255),
(0.18824, 0.20392, 0.58039),
(0.20000, 0.20784, 0.58431),
(0.21569, 0.21176, 0.58824),
(0.23137, 0.21569, 0.59216),
(0.24706, 0.21961, 0.59608),
(0.26275, 0.22353, 0.60000),
(0.27843, 0.22745, 0.60392),
(0.29412, 0.23137, 0.60784),
(0.30980, 0.23529, 0.61176),
(0.32941, 0.23922, 0.61569),
(0.34902, 0.24314, 0.61961),
(0.36863, 0.24706, 0.62745),
(0.38431, 0.25098, 0.63137),
(0.40392, 0.25490, 0.63529),
(0.41569, 0.25882, 0.63922),
(0.43529, 0.26275, 0.64314),
(0.45490, 0.26667, 0.64706),
(0.47059, 0.27059, 0.65098),
(0.48627, 0.27451, 0.65490),
(0.50196, 0.27843, 0.65490),
(0.51765, 0.28235, 0.65882),
(0.52941, 0.28627, 0.66275),
(0.54902, 0.29020, 0.66667),
(0.56471, 0.29412, 0.67059),
(0.58039, 0.29804, 0.67843),
(0.59216, 0.30196, 0.67843),
(0.60392, 0.30588, 0.68235),
(0.61961, 0.30980, 0.68627),
(0.63137, 0.31373, 0.69020),
(0.63922, 0.31765, 0.69020),
(0.65098, 0.32157, 0.69412),
(0.65882, 0.32549, 0.69804),
(0.67059, 0.32941, 0.70196),
(0.67843, 0.33333, 0.70196),
(0.69020, 0.33725, 0.70588),
(0.69804, 0.34118, 0.70980),
(0.70588, 0.34510, 0.71373),
(0.71373, 0.34902, 0.71373),
(0.72157, 0.35294, 0.71765),
(0.72941, 0.35686, 0.72157),
(0.73725, 0.36078, 0.72157),
(0.74510, 0.36471, 0.72941),
(0.75294, 0.36863, 0.73333),
(0.76078, 0.37255, 0.73333),
(0.76863, 0.37647, 0.73725),
(0.77647, 0.38039, 0.74118),
(0.78039, 0.38431, 0.74118),
(0.78824, 0.38824, 0.74510),
(0.79608, 0.39216, 0.74510),
(0.80392, 0.39608, 0.74902),
(0.80784, 0.40000, 0.75294),
(0.81176, 0.40392, 0.75294),
(0.81961, 0.40784, 0.75686),
(0.82353, 0.41176, 0.76078),
(0.82745, 0.41569, 0.76078),
(0.83529, 0.41961, 0.76471),
(0.83922, 0.42353, 0.76471),
(0.84314, 0.42745, 0.76863),
(0.84706, 0.43137, 0.76863),
(0.85098, 0.43529, 0.77255),
(0.85490, 0.43922, 0.78039),
(0.85882, 0.44314, 0.78039),
(0.86275, 0.44706, 0.78431),
(0.86667, 0.45098, 0.78431),
(0.87059, 0.45490, 0.78824),
(0.87451, 0.45882, 0.78824),
(0.87843, 0.46275, 0.79216),
(0.88235, 0.46667, 0.79216),
(0.88627, 0.47059, 0.79608),
(0.89020, 0.47451, 0.79608),
(0.89412, 0.47843, 0.80000),
(0.89804, 0.48235, 0.80000),
(0.89804, 0.48627, 0.80392),
(0.90196, 0.49020, 0.80392),
(0.90588, 0.49412, 0.80784),
(0.90980, 0.49804, 0.80784),
(0.91373, 0.50196, 0.81176),
(0.91373, 0.50588, 0.81176),
(0.91765, 0.50980, 0.81569),
(0.92157, 0.51373, 0.81569),
(0.92157, 0.51765, 0.81961),
(0.92549, 0.52157, 0.81961),
(0.92941, 0.52549, 0.82745),
(0.92941, 0.52941, 0.82745),
(0.93333, 0.53333, 0.83137),
(0.93725, 0.53725, 0.83137),
(0.93725, 0.54118, 0.83529),
(0.93725, 0.54510, 0.83529),
(0.94118, 0.54902, 0.83922),
(0.94118, 0.55294, 0.83922),
(0.94510, 0.55686, 0.83922),
(0.94510, 0.56078, 0.84314),
(0.94902, 0.56471, 0.84314),
(0.94902, 0.56863, 0.84706),
(0.95294, 0.57255, 0.84706),
(0.95294, 0.57647, 0.85098),
(0.95294, 0.58039, 0.85098),
(0.95686, 0.58431, 0.85490),
(0.95686, 0.58824, 0.85490),
(0.96078, 0.59216, 0.85490),
(0.96078, 0.59608, 0.85882),
(0.96078, 0.60000, 0.85882),
(0.96471, 0.60392, 0.86275),
(0.96471, 0.60784, 0.86275),
(0.96471, 0.61176, 0.86275),
(0.96863, 0.61569, 0.86667),
(0.96863, 0.61961, 0.86667),
(0.97255, 0.62353, 0.87059),
(0.97255, 0.62745, 0.87059),
(0.97255, 0.63137, 0.87059),
(0.97647, 0.63529, 0.87843),
(0.97647, 0.63922, 0.87843),
(0.98039, 0.64314, 0.88235),
(0.98039, 0.64706, 0.88235),
(0.98039, 0.65098, 0.88235),
(0.98431, 0.65490, 0.88627),
(0.98431, 0.65882, 0.88627),
(0.98431, 0.66275, 0.89020),
(0.98824, 0.66667, 0.89020),
(0.98824, 0.67059, 0.89020),
(0.98824, 0.67451, 0.89412),
(0.99216, 0.67843, 0.89412),
(0.99216, 0.68235, 0.89412),
(0.99216, 0.68627, 0.89804),
(0.99216, 0.69020, 0.89804),
(0.99216, 0.69412, 0.89804),
(0.99608, 0.69804, 0.90196),
(0.99608, 0.70196, 0.90196),
(0.99608, 0.70588, 0.90588),
(0.99608, 0.70980, 0.90588),
(0.99608, 0.71373, 0.90588),
(0.99608, 0.71765, 0.90980),
(0.99608, 0.72157, 0.90980),
(0.99608, 0.72549, 0.90980),
(0.99608, 0.72941, 0.91373),
(0.99608, 0.73333, 0.91373),
(0.99608, 0.73725, 0.91373),
(0.99608, 0.74118, 0.91765),
(0.99608, 0.74510, 0.91765),
(0.99608, 0.74902, 0.91765),
(0.99608, 0.75294, 0.92157),
(0.99608, 0.75686, 0.92157),
(0.99608, 0.76078, 0.92157),
(0.99608, 0.76471, 0.92941),
(0.99608, 0.76863, 0.92941),
(0.99608, 0.77255, 0.92941),
(0.99608, 0.77647, 0.93333),
(0.99608, 0.78039, 0.93333),
(0.99608, 0.78431, 0.93333),
(0.99608, 0.78824, 0.93725),
(1.00000, 0.79216, 0.93725),
(1.00000, 0.79608, 0.93725),
(1.00000, 0.80000, 0.94118),
(1.00000, 0.80392, 0.94118),
(1.00000, 0.80784, 0.94118),
(1.00000, 0.81176, 0.94118),
(1.00000, 0.81569, 0.94510),
(1.00000, 0.81961, 0.94510),
(1.00000, 0.82353, 0.94510),
(1.00000, 0.82745, 0.94902),
(1.00000, 0.83137, 0.94902),
(1.00000, 0.83529, 0.94902),
(1.00000, 0.83922, 0.95294),
(1.00000, 0.84314, 0.95294),
(1.00000, 0.84706, 0.95294),
(1.00000, 0.85098, 0.95686),
(1.00000, 0.85490, 0.95686),
(1.00000, 0.85882, 0.95686),
(1.00000, 0.86275, 0.95686),
(1.00000, 0.86667, 0.96078),
(1.00000, 0.87059, 0.96078),
(1.00000, 0.87451, 0.96078),
(1.00000, 0.87843, 0.96471),
(1.00000, 0.88235, 0.96471),
(1.00000, 0.88627, 0.96471),
(1.00000, 0.89020, 0.96471),
(1.00000, 0.89412, 0.96863),
(1.00000, 0.89804, 0.96863),
(1.00000, 0.90196, 0.96863),
(1.00000, 0.90588, 0.97255),
(1.00000, 0.90980, 0.97255),
(1.00000, 0.91373, 0.97255),
(1.00000, 0.91765, 0.97255),
(1.00000, 0.92157, 0.98039),
(1.00000, 0.92549, 0.98039),
(1.00000, 0.92941, 0.98039),
(1.00000, 0.93333, 0.98039),
(1.00000, 0.93725, 0.98431),
(1.00000, 0.94118, 0.98431),
(1.00000, 0.94510, 0.98431),
(1.00000, 0.94902, 0.98824),
(1.00000, 0.95294, 0.98824),
(1.00000, 0.95686, 0.98824),
(1.00000, 0.96078, 0.98824),
(1.00000, 0.96471, 0.99216),
(1.00000, 0.96863, 0.99216),
(1.00000, 0.97255, 0.99216),
(1.00000, 0.97647, 0.99216),
(1.00000, 0.98039, 0.99608),
(1.00000, 0.98431, 0.99608),
(1.00000, 0.98824, 0.99608),
(1.00000, 0.99216, 0.99608),
(1.00000, 0.99608, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random1 = (
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16471),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.23137, 0.00000, 0.31765),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.47451),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.47059, 0.00000, 0.63922),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.23137, 0.00000, 0.80392),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.70980, 0.81176, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.09804, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
)
cmap_random2 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.47059, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.62745, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 0.78431, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 1.00000, 0.00392),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00392, 0.86275, 0.47059),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.78431, 0.62745),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.70588, 0.78431),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.47059, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.23529, 0.00392, 1.00000),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.47059, 0.00392, 0.78431),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.62745, 0.00392, 0.62745),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.78431, 0.00392, 0.47059),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(0.90196, 0.11765, 0.23529),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.23529, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.99216, 0.59608, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.90196, 0.00000),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(0.98039, 0.98039, 0.47059),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random3 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random4 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_random5 = (
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 0.99216),
(0.00000, 0.00000, 0.98824),
(0.00392, 0.00000, 0.98431),
(0.00392, 0.00000, 0.98039),
(0.00784, 0.00000, 0.97647),
(0.00784, 0.00000, 0.96863),
(0.01176, 0.00000, 0.96471),
(0.01569, 0.00000, 0.96078),
(0.01569, 0.00000, 0.95686),
(0.01961, 0.00000, 0.95294),
(0.01961, 0.00000, 0.94510),
(0.02353, 0.00784, 0.94118),
(0.02745, 0.01569, 0.93725),
(0.02745, 0.02745, 0.93333),
(0.03137, 0.03922, 0.92941),
(0.03529, 0.05098, 0.92157),
(0.03529, 0.06667, 0.91765),
(0.03922, 0.08235, 0.91373),
(0.04314, 0.09804, 0.90980),
(0.04706, 0.11765, 0.90588),
(0.04706, 0.09804, 0.90196),
(0.05098, 0.08235, 0.89412),
(0.05490, 0.06667, 0.89020),
(0.05490, 0.05098, 0.88627),
(0.05882, 0.03922, 0.88235),
(0.06275, 0.02745, 0.87843),
(0.06667, 0.01569, 0.87059),
(0.07059, 0.00784, 0.86667),
(0.07059, 0.00000, 0.86275),
(0.07451, 0.00000, 0.85882),
(0.07843, 0.00392, 0.85490),
(0.08235, 0.01176, 0.85098),
(0.08235, 0.02353, 0.84314),
(0.08627, 0.03922, 0.83922),
(0.09020, 0.05490, 0.83529),
(0.09412, 0.07059, 0.83137),
(0.09804, 0.09020, 0.82745),
(0.09804, 0.10980, 0.82353),
(0.10196, 0.13333, 0.81569),
(0.10588, 0.15686, 0.81176),
(0.10980, 0.13333, 0.80784),
(0.11373, 0.10980, 0.80392),
(0.11765, 0.09020, 0.80000),
(0.11765, 0.07059, 0.79608),
(0.12157, 0.05490, 0.79216),
(0.12549, 0.03922, 0.78431),
(0.12941, 0.02353, 0.78039),
(0.13333, 0.01176, 0.77647),
(0.13725, 0.00392, 0.77255),
(0.14118, 0.00000, 0.76863),
(0.14118, 0.00392, 0.76471),
(0.14510, 0.01569, 0.75686),
(0.14902, 0.03137, 0.75294),
(0.15294, 0.04706, 0.74902),
(0.15686, 0.06667, 0.74510),
(0.16078, 0.09020, 0.74118),
(0.16471, 0.11373, 0.73725),
(0.16863, 0.13725, 0.73333),
(0.17255, 0.16471, 0.72549),
(0.17255, 0.19608, 0.72157),
(0.17647, 0.16471, 0.71765),
(0.18039, 0.13725, 0.71373),
(0.18431, 0.11373, 0.70980),
(0.18824, 0.09020, 0.70588),
(0.19216, 0.06667, 0.70196),
(0.19608, 0.04706, 0.69804),
(0.20000, 0.03137, 0.69020),
(0.20392, 0.01569, 0.68627),
(0.20784, 0.00392, 0.68235),
(0.21176, 0.00000, 0.67843),
(0.21176, 0.00784, 0.67451),
(0.21569, 0.02353, 0.67059),
(0.21961, 0.04314, 0.66667),
(0.22353, 0.06667, 0.66275),
(0.22745, 0.09412, 0.65490),
(0.23137, 0.12549, 0.65098),
(0.23529, 0.15686, 0.64706),
(0.23922, 0.19608, 0.64314),
(0.24314, 0.23137, 0.63922),
(0.24706, 0.27451, 0.63529),
(0.25098, 0.23137, 0.63137),
(0.25490, 0.19608, 0.62745),
(0.25882, 0.15686, 0.61961),
(0.26275, 0.12549, 0.61569),
(0.26667, 0.09412, 0.61176),
(0.27059, 0.06667, 0.60784),
(0.27451, 0.04314, 0.60392),
(0.27843, 0.02353, 0.60000),
(0.28235, 0.00784, 0.59608),
(0.28627, 0.00000, 0.59216),
(0.29020, 0.00784, 0.58824),
(0.29412, 0.03137, 0.58431),
(0.29804, 0.05490, 0.57647),
(0.29804, 0.08627, 0.57255),
(0.30196, 0.12157, 0.56863),
(0.30588, 0.16078, 0.56471),
(0.30980, 0.20392, 0.56078),
(0.31373, 0.25098, 0.55686),
(0.31765, 0.29804, 0.55294),
(0.32157, 0.35294, 0.54902),
(0.32549, 0.29804, 0.54510),
(0.32941, 0.25098, 0.54118),
(0.33333, 0.20392, 0.53725),
(0.33725, 0.16078, 0.52941),
(0.34118, 0.12157, 0.52549),
(0.34510, 0.08627, 0.52157),
(0.34902, 0.05490, 0.51765),
(0.35294, 0.03137, 0.51373),
(0.35686, 0.00784, 0.50980),
(0.36078, 0.00000, 0.50588),
(0.36471, 0.01176, 0.50196),
(0.37255, 0.03529, 0.49804),
(0.37647, 0.07059, 0.49412),
(0.38039, 0.10588, 0.49020),
(0.38431, 0.14902, 0.48627),
(0.38824, 0.20000, 0.48235),
(0.39216, 0.25098, 0.47843),
(0.39608, 0.30588, 0.47059),
(0.40000, 0.36471, 0.46667),
(0.40392, 0.43137, 0.46275),
(0.40784, 0.36471, 0.45882),
(0.41176, 0.30588, 0.45490),
(0.41569, 0.25098, 0.45098),
(0.41961, 0.20000, 0.44706),
(0.42353, 0.14902, 0.44314),
(0.42745, 0.10588, 0.43922),
(0.43137, 0.07059, 0.43529),
(0.43529, 0.03529, 0.43137),
(0.43922, 0.01176, 0.42745),
(0.44314, 0.00000, 0.42353),
(0.44706, 0.01569, 0.41961),
(0.45098, 0.04314, 0.41569),
(0.45490, 0.08235, 0.41176),
(0.45882, 0.12549, 0.40784),
(0.46275, 0.17647, 0.40392),
(0.46667, 0.23529, 0.40000),
(0.47059, 0.29804, 0.39608),
(0.47843, 0.36471, 0.39216),
(0.48235, 0.43137, 0.38824),
(0.48627, 0.50980, 0.38431),
(0.49020, 0.43137, 0.38039),
(0.49412, 0.36471, 0.37647),
(0.49804, 0.29804, 0.37255),
(0.50196, 0.23529, 0.36471),
(0.50588, 0.17647, 0.36078),
(0.50980, 0.12549, 0.35686),
(0.51373, 0.08235, 0.35294),
(0.51765, 0.04314, 0.34902),
(0.52157, 0.01569, 0.34510),
(0.52549, 0.00000, 0.34118),
(0.52941, 0.01569, 0.33725),
(0.53725, 0.05098, 0.33333),
(0.54118, 0.09412, 0.32941),
(0.54510, 0.14510, 0.32549),
(0.54902, 0.20784, 0.32157),
(0.55294, 0.27059, 0.31765),
(0.55686, 0.34118, 0.31373),
(0.56078, 0.41961, 0.30980),
(0.56471, 0.50196, 0.30588),
(0.56863, 0.58824, 0.30196),
(0.57255, 0.50196, 0.29804),
(0.57647, 0.41961, 0.29804),
(0.58431, 0.34118, 0.29412),
(0.58824, 0.27059, 0.29020),
(0.59216, 0.20784, 0.28627),
(0.59608, 0.14510, 0.28235),
(0.60000, 0.09412, 0.27843),
(0.60392, 0.05098, 0.27451),
(0.60784, 0.01569, 0.27059),
(0.61176, 0.00000, 0.26667),
(0.61569, 0.01961, 0.26275),
(0.61961, 0.05882, 0.25882),
(0.62745, 0.10588, 0.25490),
(0.63137, 0.16863, 0.25098),
(0.63529, 0.23529, 0.24706),
(0.63922, 0.30980, 0.24314),
(0.64314, 0.38824, 0.23922),
(0.64706, 0.47451, 0.23529),
(0.65098, 0.56863, 0.23137),
(0.65490, 0.66667, 0.22745),
(0.66275, 0.56863, 0.22353),
(0.66667, 0.47451, 0.21961),
(0.67059, 0.38824, 0.21569),
(0.67451, 0.30980, 0.21176),
(0.67843, 0.23529, 0.21176),
(0.68235, 0.16863, 0.20784),
(0.68627, 0.10588, 0.20392),
(0.69020, 0.05882, 0.20000),
(0.69804, 0.01961, 0.19608),
(0.70196, 0.00000, 0.19216),
(0.70588, 0.02353, 0.18824),
(0.70980, 0.06275, 0.18431),
(0.71373, 0.12157, 0.18039),
(0.71765, 0.18824, 0.17647),
(0.72157, 0.26275, 0.17255),
(0.72549, 0.34510, 0.17255),
(0.73333, 0.43529, 0.16863),
(0.73725, 0.52941, 0.16471),
(0.74118, 0.63529, 0.16078),
(0.74510, 0.74510, 0.15686),
(0.74902, 0.63529, 0.15294),
(0.75294, 0.52941, 0.14902),
(0.75686, 0.43529, 0.14510),
(0.76471, 0.34510, 0.14118),
(0.76863, 0.26275, 0.14118),
(0.77255, 0.18824, 0.13725),
(0.77647, 0.12157, 0.13333),
(0.78039, 0.06275, 0.12941),
(0.78431, 0.02353, 0.12549),
(0.79216, 0.00000, 0.12157),
(0.79608, 0.02353, 0.11765),
(0.80000, 0.07059, 0.11765),
(0.80392, 0.13333, 0.11373),
(0.80784, 0.20784, 0.10980),
(0.81176, 0.29020, 0.10588),
(0.81569, 0.38039, 0.10196),
(0.82353, 0.47843, 0.09804),
(0.82745, 0.58824, 0.09804),
(0.83137, 0.70196, 0.09412),
(0.83529, 0.82353, 0.09020),
(0.83922, 0.70196, 0.08627),
(0.84314, 0.58824, 0.08235),
(0.85098, 0.47843, 0.08235),
(0.85490, 0.38039, 0.07843),
(0.85882, 0.29020, 0.07451),
(0.86275, 0.20784, 0.07059),
(0.86667, 0.13333, 0.07059),
(0.87059, 0.07059, 0.06667),
(0.87843, 0.02353, 0.06275),
(0.88235, 0.00000, 0.05882),
(0.88627, 0.02745, 0.05490),
(0.89020, 0.07843, 0.05490),
(0.89412, 0.14510, 0.05098),
(0.90196, 0.22745, 0.04706),
(0.90588, 0.31765, 0.04706),
(0.90980, 0.41569, 0.04314),
(0.91373, 0.52549, 0.03922),
(0.91765, 0.64314, 0.03529),
(0.92157, 0.76863, 0.03529),
(0.92941, 0.90196, 0.03137),
(0.93333, 0.76863, 0.02745),
(0.93725, 0.64314, 0.02745),
(0.94118, 0.52549, 0.02353),
(0.94510, 0.41569, 0.01961),
(0.95294, 0.31765, 0.01961),
(0.95686, 0.25882, 0.01569),
(0.96078, 0.23137, 0.01569),
(0.96471, 0.23922, 0.01176),
(0.96863, 0.27843, 0.00784),
(0.97647, 0.35294, 0.00784),
(0.98039, 0.46275, 0.00392),
(0.98431, 0.58431, 0.00392),
(0.98824, 0.71373, 0.00000),
(0.99216, 0.85098, 0.00000),
(1.00000, 1.00000, 0.00000),
)
cmap_random6 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.14118, 0.00000),
(0.00000, 0.28235, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.56471, 0.00000),
(0.00000, 0.70588, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 0.00000, 0.14118),
(0.00000, 0.14118, 0.14118),
(0.00000, 0.28235, 0.14118),
(0.00000, 0.42353, 0.14118),
(0.00000, 0.56471, 0.14118),
(0.00000, 0.70588, 0.14118),
(0.00000, 0.84706, 0.14118),
(0.00000, 0.98824, 0.14118),
(0.00000, 0.00000, 0.28235),
(0.00000, 0.14118, 0.28235),
(0.00000, 0.28235, 0.28235),
(0.00000, 0.42353, 0.28235),
(0.00000, 0.56471, 0.28235),
(0.00000, 0.70588, 0.28235),
(0.00000, 0.84706, 0.28235),
(0.00000, 0.98824, 0.28235),
(0.00000, 0.00000, 0.42353),
(0.00000, 0.14118, 0.42353),
(0.00000, 0.28235, 0.42353),
(0.00000, 0.42353, 0.42353),
(0.00000, 0.56471, 0.42353),
(0.00000, 0.70588, 0.42353),
(0.00000, 0.84706, 0.42353),
(0.00000, 0.98824, 0.42353),
(0.00000, 0.00000, 0.56471),
(0.00000, 0.14118, 0.56471),
(0.00000, 0.28235, 0.56471),
(0.00000, 0.42353, 0.56471),
(0.00000, 0.56471, 0.56471),
(0.00000, 0.70588, 0.56471),
(0.00000, 0.84706, 0.56471),
(0.00000, 0.98824, 0.56471),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.14118, 0.70588),
(0.00000, 0.28235, 0.70588),
(0.00000, 0.42353, 0.70588),
(0.00000, 0.56471, 0.70588),
(0.00000, 0.70588, 0.70588),
(0.00000, 0.84706, 0.70588),
(0.00000, 0.98824, 0.70588),
(0.00000, 0.00000, 0.84706),
(0.00000, 0.14118, 0.84706),
(0.00000, 0.28235, 0.84706),
(0.00000, 0.42353, 0.84706),
(0.00000, 0.56471, 0.84706),
(0.00000, 0.70588, 0.84706),
(0.00000, 0.84706, 0.84706),
(0.00000, 0.98824, 0.84706),
(0.00000, 0.00000, 0.98824),
(0.00000, 0.14118, 0.98824),
(0.00000, 0.28235, 0.98824),
(0.00000, 0.42353, 0.98824),
(0.00000, 0.56471, 0.98824),
(0.00000, 0.70588, 0.98824),
(0.00000, 0.84706, 0.98824),
(0.00000, 0.98824, 0.98824),
(0.00000, 0.00000, 0.00000),
(0.32941, 0.14118, 0.00000),
(0.32941, 0.28235, 0.00000),
(0.32941, 0.42353, 0.00000),
(0.32941, 0.56471, 0.00000),
(0.32941, 0.70588, 0.00000),
(0.32941, 0.84706, 0.00000),
(0.32941, 0.98824, 0.00000),
(0.32941, 0.00000, 0.14118),
(0.32941, 0.14118, 0.14118),
(0.32941, 0.28235, 0.14118),
(0.32941, 0.42353, 0.14118),
(0.32941, 0.56471, 0.14118),
(0.32941, 0.70588, 0.14118),
(0.32941, 0.84706, 0.14118),
(0.32941, 0.98824, 0.14118),
(0.32941, 0.00000, 0.28235),
(0.32941, 0.14118, 0.28235),
(0.32941, 0.28235, 0.28235),
(0.32941, 0.42353, 0.28235),
(0.32941, 0.56471, 0.28235),
(0.32941, 0.70588, 0.28235),
(0.32941, 0.84706, 0.28235),
(0.32941, 0.98824, 0.28235),
(0.32941, 0.00000, 0.42353),
(0.32941, 0.14118, 0.42353),
(0.32941, 0.28235, 0.42353),
(0.32941, 0.42353, 0.42353),
(0.32941, 0.56471, 0.42353),
(0.32941, 0.70588, 0.42353),
(0.32941, 0.84706, 0.42353),
(0.32941, 0.98824, 0.42353),
(0.32941, 0.00000, 0.56471),
(0.32941, 0.14118, 0.56471),
(0.32941, 0.28235, 0.56471),
(0.32941, 0.42353, 0.56471),
(0.32941, 0.56471, 0.56471),
(0.32941, 0.70588, 0.56471),
(0.32941, 0.84706, 0.56471),
(0.32941, 0.98824, 0.56471),
(0.32941, 0.00000, 0.70588),
(0.32941, 0.14118, 0.70588),
(0.32941, 0.28235, 0.70588),
(0.32941, 0.42353, 0.70588),
(0.32941, 0.56471, 0.70588),
(0.32941, 0.70588, 0.70588),
(0.32941, 0.84706, 0.70588),
(0.32941, 0.98824, 0.70588),
(0.32941, 0.00000, 0.84706),
(0.32941, 0.14118, 0.84706),
(0.32941, 0.28235, 0.84706),
(0.32941, 0.42353, 0.84706),
(0.32941, 0.56471, 0.84706),
(0.32941, 0.70588, 0.84706),
(0.32941, 0.84706, 0.84706),
(0.32941, 0.98824, 0.84706),
(0.32941, 0.00000, 0.98824),
(0.32941, 0.14118, 0.98824),
(0.32941, 0.28235, 0.98824),
(0.32941, 0.42353, 0.98824),
(0.32941, 0.56471, 0.98824),
(0.32941, 0.70588, 0.98824),
(0.32941, 0.84706, 0.98824),
(0.32941, 0.98824, 0.98824),
(0.32941, 0.00000, 0.00000),
(0.65882, 0.14118, 0.00000),
(0.65882, 0.28235, 0.00000),
(0.65882, 0.42353, 0.00000),
(0.65882, 0.56471, 0.00000),
(0.65882, 0.70588, 0.00000),
(0.65882, 0.84706, 0.00000),
(0.65882, 0.98824, 0.00000),
(0.65882, 0.00000, 0.14118),
(0.65882, 0.14118, 0.14118),
(0.65882, 0.28235, 0.14118),
(0.65882, 0.42353, 0.14118),
(0.65882, 0.56471, 0.14118),
(0.65882, 0.70588, 0.14118),
(0.65882, 0.84706, 0.14118),
(0.65882, 0.98824, 0.14118),
(0.65882, 0.00000, 0.28235),
(0.65882, 0.14118, 0.28235),
(0.65882, 0.28235, 0.28235),
(0.65882, 0.42353, 0.28235),
(0.65882, 0.56471, 0.28235),
(0.65882, 0.70588, 0.28235),
(0.65882, 0.84706, 0.28235),
(0.65882, 0.98824, 0.28235),
(0.65882, 0.00000, 0.42353),
(0.65882, 0.14118, 0.42353),
(0.65882, 0.28235, 0.42353),
(0.65882, 0.42353, 0.42353),
(0.65882, 0.56471, 0.42353),
(0.65882, 0.70588, 0.42353),
(0.65882, 0.84706, 0.42353),
(0.65882, 0.98824, 0.42353),
(0.65882, 0.00000, 0.56471),
(0.65882, 0.14118, 0.56471),
(0.65882, 0.28235, 0.56471),
(0.65882, 0.42353, 0.56471),
(0.65882, 0.56471, 0.56471),
(0.65882, 0.70588, 0.56471),
(0.65882, 0.84706, 0.56471),
(0.65882, 0.98824, 0.56471),
(0.65882, 0.00000, 0.70588),
(0.65882, 0.14118, 0.70588),
(0.65882, 0.28235, 0.70588),
(0.65882, 0.42353, 0.70588),
(0.65882, 0.56471, 0.70588),
(0.65882, 0.70588, 0.70588),
(0.65882, 0.84706, 0.70588),
(0.65882, 0.98824, 0.70588),
(0.65882, 0.00000, 0.84706),
(0.65882, 0.14118, 0.84706),
(0.65882, 0.28235, 0.84706),
(0.65882, 0.42353, 0.84706),
(0.65882, 0.56471, 0.84706),
(0.65882, 0.70588, 0.84706),
(0.65882, 0.84706, 0.84706),
(0.65882, 0.98824, 0.84706),
(0.65882, 0.00000, 0.98824),
(0.65882, 0.14118, 0.98824),
(0.65882, 0.28235, 0.98824),
(0.65882, 0.42353, 0.98824),
(0.65882, 0.56471, 0.98824),
(0.65882, 0.70588, 0.98824),
(0.65882, 0.84706, 0.98824),
(0.65882, 0.98824, 0.98824),
(0.65882, 0.00000, 0.00000),
(0.98824, 0.14118, 0.00000),
(0.98824, 0.28235, 0.00000),
(0.98824, 0.42353, 0.00000),
(0.98824, 0.56471, 0.00000),
(0.98824, 0.70588, 0.00000),
(0.98824, 0.84706, 0.00000),
(0.98824, 0.98824, 0.00000),
(0.98824, 0.00000, 0.14118),
(0.98824, 0.14118, 0.14118),
(0.98824, 0.28235, 0.14118),
(0.98824, 0.42353, 0.14118),
(0.98824, 0.56471, 0.14118),
(0.98824, 0.70588, 0.14118),
(0.98824, 0.84706, 0.14118),
(0.98824, 0.98824, 0.14118),
(0.98824, 0.00000, 0.28235),
(0.98824, 0.14118, 0.28235),
(0.98824, 0.28235, 0.28235),
(0.98824, 0.42353, 0.28235),
(0.98824, 0.56471, 0.28235),
(0.98824, 0.70588, 0.28235),
(0.98824, 0.84706, 0.28235),
(0.98824, 0.98824, 0.28235),
(0.98824, 0.00000, 0.42353),
(0.98824, 0.14118, 0.42353),
(0.98824, 0.28235, 0.42353),
(0.98824, 0.42353, 0.42353),
(0.98824, 0.56471, 0.42353),
(0.98824, 0.70588, 0.42353),
(0.98824, 0.84706, 0.42353),
(0.98824, 0.98824, 0.42353),
(0.98824, 0.00000, 0.56471),
(0.98824, 0.14118, 0.56471),
(0.98824, 0.28235, 0.56471),
(0.98824, 0.42353, 0.56471),
(0.98824, 0.56471, 0.56471),
(0.98824, 0.70588, 0.56471),
(0.98824, 0.84706, 0.56471),
(0.98824, 0.98824, 0.56471),
(0.98824, 0.00000, 0.70588),
(0.98824, 0.14118, 0.70588),
(0.98824, 0.28235, 0.70588),
(0.98824, 0.42353, 0.70588),
(0.98824, 0.56471, 0.70588),
(0.98824, 0.70588, 0.70588),
(0.98824, 0.84706, 0.70588),
(0.98824, 0.98824, 0.70588),
(0.98824, 0.00000, 0.84706),
(0.98824, 0.14118, 0.84706),
(0.98824, 0.28235, 0.84706),
(0.98824, 0.42353, 0.84706),
(0.98824, 0.56471, 0.84706),
(0.98824, 0.70588, 0.84706),
(0.98824, 0.84706, 0.84706),
(0.98824, 0.98824, 0.84706),
(0.98824, 0.00000, 0.98824),
(0.98824, 0.14118, 0.98824),
(0.98824, 0.28235, 0.98824),
(0.98824, 0.42353, 0.98824),
(0.98824, 0.56471, 0.98824),
(0.98824, 0.70588, 0.98824),
(0.98824, 0.84706, 0.98824),
(0.98824, 0.98824, 0.98824),
)
cmap_color = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.18431, 0.18431, 0.18431),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.37255, 0.37255, 0.37255),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.56078, 0.56078, 0.56078),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.74902, 0.74902, 0.74902),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.93725, 0.93725, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.18431, 0.93725),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.37255, 0.74902),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.74902, 0.30980),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.30980, 0.62353, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.49804, 0.49804, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.62353, 0.30980, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
(0.74902, 0.00000, 0.30980),
)
cmap_standard = (
(0.00392, 0.00392, 0.33333),
(0.00784, 0.00784, 0.34118),
(0.01176, 0.01176, 0.34902),
(0.01569, 0.01569, 0.35686),
(0.01961, 0.01961, 0.36471),
(0.02353, 0.02353, 0.37255),
(0.02745, 0.02745, 0.38039),
(0.03137, 0.03137, 0.38824),
(0.03529, 0.03529, 0.39608),
(0.03922, 0.03922, 0.40392),
(0.04314, 0.04314, 0.41176),
(0.04706, 0.04706, 0.41961),
(0.05098, 0.05098, 0.42745),
(0.05490, 0.05490, 0.43529),
(0.05882, 0.05882, 0.44314),
(0.06275, 0.06275, 0.45098),
(0.06667, 0.06667, 0.45882),
(0.07059, 0.07059, 0.46667),
(0.07451, 0.07451, 0.47451),
(0.07843, 0.07843, 0.48235),
(0.08235, 0.08235, 0.49020),
(0.08627, 0.08627, 0.49804),
(0.09020, 0.09020, 0.50588),
(0.09412, 0.09412, 0.51373),
(0.09804, 0.09804, 0.52157),
(0.10196, 0.10196, 0.52941),
(0.10588, 0.10588, 0.53725),
(0.10980, 0.10980, 0.54510),
(0.11373, 0.11373, 0.55294),
(0.11765, 0.11765, 0.56078),
(0.12157, 0.12157, 0.56863),
(0.12549, 0.12549, 0.57647),
(0.12941, 0.12941, 0.58431),
(0.13333, 0.13333, 0.59216),
(0.13725, 0.13725, 0.60000),
(0.14118, 0.14118, 0.60784),
(0.14510, 0.14510, 0.61569),
(0.14902, 0.14902, 0.62353),
(0.15294, 0.15294, 0.63137),
(0.15686, 0.15686, 0.63922),
(0.16078, 0.16078, 0.64706),
(0.16471, 0.16471, 0.65490),
(0.16863, 0.16863, 0.66275),
(0.17255, 0.17255, 0.67059),
(0.17647, 0.17647, 0.67843),
(0.18039, 0.18039, 0.68627),
(0.18431, 0.18431, 0.69412),
(0.18824, 0.18824, 0.70196),
(0.19216, 0.19216, 0.70980),
(0.19608, 0.19608, 0.71765),
(0.20000, 0.20000, 0.72549),
(0.20392, 0.20392, 0.73333),
(0.20784, 0.20784, 0.74118),
(0.21176, 0.21176, 0.74902),
(0.21569, 0.21569, 0.75686),
(0.21961, 0.21961, 0.76471),
(0.22353, 0.22353, 0.77255),
(0.22745, 0.22745, 0.78039),
(0.23137, 0.23137, 0.78824),
(0.23529, 0.23529, 0.79608),
(0.23922, 0.23922, 0.80392),
(0.24314, 0.24314, 0.81176),
(0.24706, 0.24706, 0.81961),
(0.25098, 0.25098, 0.82745),
(0.25490, 0.25490, 0.83529),
(0.25882, 0.25882, 0.84314),
(0.26275, 0.26275, 0.85098),
(0.26667, 0.26667, 0.85882),
(0.27059, 0.27059, 0.86667),
(0.27451, 0.27451, 0.87451),
(0.27843, 0.27843, 0.88235),
(0.28235, 0.28235, 0.89020),
(0.28627, 0.28627, 0.89804),
(0.29020, 0.29020, 0.90588),
(0.29412, 0.29412, 0.91373),
(0.29804, 0.29804, 0.92157),
(0.30196, 0.30196, 0.92941),
(0.30588, 0.30588, 0.93725),
(0.30980, 0.30980, 0.94510),
(0.31373, 0.31373, 0.95294),
(0.31765, 0.31765, 0.96078),
(0.32157, 0.32157, 0.96863),
(0.32549, 0.32549, 0.97647),
(0.32941, 0.32941, 0.98431),
(0.33333, 0.33333, 0.99216),
(0.00392, 0.33333, 0.00392),
(0.00784, 0.34118, 0.00784),
(0.01176, 0.34902, 0.01176),
(0.01569, 0.35686, 0.01569),
(0.01961, 0.36471, 0.01961),
(0.02353, 0.37255, 0.02353),
(0.02745, 0.38039, 0.02745),
(0.03137, 0.38824, 0.03137),
(0.03529, 0.39608, 0.03529),
(0.03922, 0.40392, 0.03922),
(0.04314, 0.41176, 0.04314),
(0.04706, 0.41961, 0.04706),
(0.05098, 0.42745, 0.05098),
(0.05490, 0.43529, 0.05490),
(0.05882, 0.44314, 0.05882),
(0.06275, 0.45098, 0.06275),
(0.06667, 0.45882, 0.06667),
(0.07059, 0.46667, 0.07059),
(0.07451, 0.47451, 0.07451),
(0.07843, 0.48235, 0.07843),
(0.08235, 0.49020, 0.08235),
(0.08627, 0.49804, 0.08627),
(0.09020, 0.50588, 0.09020),
(0.09412, 0.51373, 0.09412),
(0.09804, 0.52157, 0.09804),
(0.10196, 0.52941, 0.10196),
(0.10588, 0.53725, 0.10588),
(0.10980, 0.54510, 0.10980),
(0.11373, 0.55294, 0.11373),
(0.11765, 0.56078, 0.11765),
(0.12157, 0.56863, 0.12157),
(0.12549, 0.57647, 0.12549),
(0.12941, 0.58431, 0.12941),
(0.13333, 0.59216, 0.13333),
(0.13725, 0.60000, 0.13725),
(0.14118, 0.60784, 0.14118),
(0.14510, 0.61569, 0.14510),
(0.14902, 0.62353, 0.14902),
(0.15294, 0.63137, 0.15294),
(0.15686, 0.63922, 0.15686),
(0.16078, 0.64706, 0.16078),
(0.16471, 0.65490, 0.16471),
(0.16863, 0.66275, 0.16863),
(0.17255, 0.67059, 0.17255),
(0.17647, 0.67843, 0.17647),
(0.18039, 0.68627, 0.18039),
(0.18431, 0.69412, 0.18431),
(0.18824, 0.70196, 0.18824),
(0.19216, 0.70980, 0.19216),
(0.19608, 0.71765, 0.19608),
(0.20000, 0.72549, 0.20000),
(0.20392, 0.73333, 0.20392),
(0.20784, 0.74118, 0.20784),
(0.21176, 0.74902, 0.21176),
(0.21569, 0.75686, 0.21569),
(0.21961, 0.76471, 0.21961),
(0.22353, 0.77255, 0.22353),
(0.22745, 0.78039, 0.22745),
(0.23137, 0.78824, 0.23137),
(0.23529, 0.79608, 0.23529),
(0.23922, 0.80392, 0.23922),
(0.24314, 0.81176, 0.24314),
(0.24706, 0.81961, 0.24706),
(0.25098, 0.82745, 0.25098),
(0.25490, 0.83529, 0.25490),
(0.25882, 0.84314, 0.25882),
(0.26275, 0.85098, 0.26275),
(0.26667, 0.85882, 0.26667),
(0.27059, 0.86667, 0.27059),
(0.27451, 0.87451, 0.27451),
(0.27843, 0.88235, 0.27843),
(0.28235, 0.89020, 0.28235),
(0.28627, 0.89804, 0.28627),
(0.29020, 0.90588, 0.29020),
(0.29412, 0.91373, 0.29412),
(0.29804, 0.92157, 0.29804),
(0.30196, 0.92941, 0.30196),
(0.30588, 0.93725, 0.30588),
(0.30980, 0.94510, 0.30980),
(0.31373, 0.95294, 0.31373),
(0.31765, 0.96078, 0.31765),
(0.32157, 0.96863, 0.32157),
(0.32549, 0.97647, 0.32549),
(0.32941, 0.98431, 0.32941),
(0.33333, 0.99216, 0.33333),
(0.33333, 0.00392, 0.00392),
(0.34118, 0.00784, 0.00784),
(0.34902, 0.01176, 0.01176),
(0.35686, 0.01569, 0.01569),
(0.36471, 0.01961, 0.01961),
(0.37255, 0.02353, 0.02353),
(0.38039, 0.02745, 0.02745),
(0.38824, 0.03137, 0.03137),
(0.39608, 0.03529, 0.03529),
(0.40392, 0.03922, 0.03922),
(0.41176, 0.04314, 0.04314),
(0.41961, 0.04706, 0.04706),
(0.42745, 0.05098, 0.05098),
(0.43529, 0.05490, 0.05490),
(0.44314, 0.05882, 0.05882),
(0.45098, 0.06275, 0.06275),
(0.45882, 0.06667, 0.06667),
(0.46667, 0.07059, 0.07059),
(0.47451, 0.07451, 0.07451),
(0.48235, 0.07843, 0.07843),
(0.49020, 0.08235, 0.08235),
(0.49804, 0.08627, 0.08627),
(0.50588, 0.09020, 0.09020),
(0.51373, 0.09412, 0.09412),
(0.52157, 0.09804, 0.09804),
(0.52941, 0.10196, 0.10196),
(0.53725, 0.10588, 0.10588),
(0.54510, 0.10980, 0.10980),
(0.55294, 0.11373, 0.11373),
(0.56078, 0.11765, 0.11765),
(0.56863, 0.12157, 0.12157),
(0.57647, 0.12549, 0.12549),
(0.58431, 0.12941, 0.12941),
(0.59216, 0.13333, 0.13333),
(0.60000, 0.13725, 0.13725),
(0.60784, 0.14118, 0.14118),
(0.61569, 0.14510, 0.14510),
(0.62353, 0.14902, 0.14902),
(0.63137, 0.15294, 0.15294),
(0.63922, 0.15686, 0.15686),
(0.64706, 0.16078, 0.16078),
(0.65490, 0.16471, 0.16471),
(0.66275, 0.16863, 0.16863),
(0.67059, 0.17255, 0.17255),
(0.67843, 0.17647, 0.17647),
(0.68627, 0.18039, 0.18039),
(0.69412, 0.18431, 0.18431),
(0.70196, 0.18824, 0.18824),
(0.70980, 0.19216, 0.19216),
(0.71765, 0.19608, 0.19608),
(0.72549, 0.20000, 0.20000),
(0.73333, 0.20392, 0.20392),
(0.74118, 0.20784, 0.20784),
(0.74902, 0.21176, 0.21176),
(0.75686, 0.21569, 0.21569),
(0.76471, 0.21961, 0.21961),
(0.77255, 0.22353, 0.22353),
(0.78039, 0.22745, 0.22745),
(0.78824, 0.23137, 0.23137),
(0.79608, 0.23529, 0.23529),
(0.80392, 0.23922, 0.23922),
(0.81176, 0.24314, 0.24314),
(0.81961, 0.24706, 0.24706),
(0.82745, 0.25098, 0.25098),
(0.83529, 0.25490, 0.25490),
(0.84314, 0.25882, 0.25882),
(0.85098, 0.26275, 0.26275),
(0.85882, 0.26667, 0.26667),
(0.86667, 0.27059, 0.27059),
(0.87451, 0.27451, 0.27451),
(0.88235, 0.27843, 0.27843),
(0.89020, 0.28235, 0.28235),
(0.89804, 0.28627, 0.28627),
(0.90588, 0.29020, 0.29020),
(0.91373, 0.29412, 0.29412),
(0.92157, 0.29804, 0.29804),
(0.92941, 0.30196, 0.30196),
(0.93725, 0.30588, 0.30588),
(0.94510, 0.30980, 0.30980),
(0.95294, 0.31373, 0.31373),
(0.96078, 0.31765, 0.31765),
(0.96863, 0.32157, 0.32157),
(0.97647, 0.32549, 0.32549),
(0.98431, 0.32941, 0.32941),
(0.99216, 0.33333, 0.33333),
(1.00000, 0.33725, 0.33725),
)
cmap_blulut = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00392),
(0.00000, 0.00000, 0.00784),
(0.00000, 0.00000, 0.01176),
(0.00000, 0.00000, 0.01569),
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.02353),
(0.00000, 0.00000, 0.02745),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.03529),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.04314),
(0.00000, 0.00000, 0.04706),
(0.00001, 0.00001, 0.05098),
(0.00001, 0.00001, 0.05490),
(0.00001, 0.00001, 0.05882),
(0.00002, 0.00002, 0.06275),
(0.00002, 0.00002, 0.06667),
(0.00002, 0.00002, 0.07059),
(0.00003, 0.00003, 0.07451),
(0.00004, 0.00004, 0.07843),
(0.00005, 0.00005, 0.08235),
(0.00006, 0.00006, 0.08627),
(0.00007, 0.00007, 0.09020),
(0.00008, 0.00008, 0.09412),
(0.00009, 0.00009, 0.09804),
(0.00011, 0.00011, 0.10196),
(0.00013, 0.00013, 0.10588),
(0.00015, 0.00015, 0.10980),
(0.00017, 0.00017, 0.11373),
(0.00019, 0.00019, 0.11765),
(0.00022, 0.00022, 0.12157),
(0.00025, 0.00025, 0.12549),
(0.00028, 0.00028, 0.12941),
(0.00032, 0.00032, 0.13333),
(0.00035, 0.00035, 0.13725),
(0.00040, 0.00040, 0.14118),
(0.00044, 0.00044, 0.14510),
(0.00049, 0.00049, 0.14902),
(0.00055, 0.00055, 0.15294),
(0.00061, 0.00061, 0.15686),
(0.00067, 0.00067, 0.16078),
(0.00074, 0.00074, 0.16471),
(0.00081, 0.00081, 0.16863),
(0.00089, 0.00089, 0.17255),
(0.00097, 0.00097, 0.17647),
(0.00106, 0.00106, 0.18039),
(0.00115, 0.00115, 0.18431),
(0.00126, 0.00126, 0.18824),
(0.00136, 0.00136, 0.19216),
(0.00148, 0.00148, 0.19608),
(0.00160, 0.00160, 0.20000),
(0.00173, 0.00173, 0.20392),
(0.00187, 0.00187, 0.20784),
(0.00201, 0.00201, 0.21176),
(0.00216, 0.00216, 0.21569),
(0.00233, 0.00233, 0.21961),
(0.00250, 0.00250, 0.22353),
(0.00268, 0.00268, 0.22745),
(0.00287, 0.00287, 0.23137),
(0.00307, 0.00307, 0.23529),
(0.00327, 0.00327, 0.23922),
(0.00349, 0.00349, 0.24314),
(0.00373, 0.00373, 0.24706),
(0.00397, 0.00397, 0.25098),
(0.00422, 0.00422, 0.25490),
(0.00449, 0.00449, 0.25882),
(0.00477, 0.00477, 0.26275),
(0.00506, 0.00506, 0.26667),
(0.00536, 0.00536, 0.27059),
(0.00568, 0.00568, 0.27451),
(0.00601, 0.00601, 0.27843),
(0.00636, 0.00636, 0.28235),
(0.00672, 0.00672, 0.28627),
(0.00709, 0.00709, 0.29020),
(0.00748, 0.00748, 0.29412),
(0.00789, 0.00789, 0.29804),
(0.00831, 0.00831, 0.30196),
(0.00875, 0.00875, 0.30588),
(0.00921, 0.00921, 0.30980),
(0.00969, 0.00969, 0.31373),
(0.01018, 0.01018, 0.31765),
(0.01069, 0.01069, 0.32157),
(0.01122, 0.01122, 0.32549),
(0.01177, 0.01177, 0.32941),
(0.01235, 0.01235, 0.33333),
(0.01294, 0.01294, 0.33725),
(0.01355, 0.01355, 0.34118),
(0.01418, 0.01418, 0.34510),
(0.01484, 0.01484, 0.34902),
(0.01552, 0.01552, 0.35294),
(0.01622, 0.01622, 0.35686),
(0.01694, 0.01694, 0.36078),
(0.01769, 0.01769, 0.36471),
(0.01847, 0.01847, 0.36863),
(0.01926, 0.01926, 0.37255),
(0.02009, 0.02009, 0.37647),
(0.02094, 0.02094, 0.38039),
(0.02181, 0.02181, 0.38431),
(0.02272, 0.02272, 0.38824),
(0.02365, 0.02365, 0.39216),
(0.02461, 0.02461, 0.39608),
(0.02560, 0.02560, 0.40000),
(0.02662, 0.02662, 0.40392),
(0.02767, 0.02767, 0.40784),
(0.02875, 0.02875, 0.41176),
(0.02986, 0.02986, 0.41569),
(0.03100, 0.03100, 0.41961),
(0.03218, 0.03218, 0.42353),
(0.03338, 0.03338, 0.42745),
(0.03463, 0.03463, 0.43137),
(0.03590, 0.03590, 0.43529),
(0.03721, 0.03721, 0.43922),
(0.03856, 0.03856, 0.44314),
(0.03994, 0.03994, 0.44706),
(0.04136, 0.04136, 0.45098),
(0.04282, 0.04282, 0.45490),
(0.04432, 0.04432, 0.45882),
(0.04585, 0.04585, 0.46275),
(0.04743, 0.04743, 0.46667),
(0.04904, 0.04904, 0.47059),
(0.05070, 0.05070, 0.47451),
(0.05239, 0.05239, 0.47843),
(0.05413, 0.05413, 0.48235),
(0.05591, 0.05591, 0.48627),
(0.05774, 0.05774, 0.49020),
(0.05961, 0.05961, 0.49412),
(0.06153, 0.06153, 0.49804),
(0.06349, 0.06349, 0.50196),
(0.06549, 0.06549, 0.50588),
(0.06755, 0.06755, 0.50980),
(0.06965, 0.06965, 0.51373),
(0.07180, 0.07180, 0.51765),
(0.07400, 0.07400, 0.52157),
(0.07625, 0.07625, 0.52549),
(0.07856, 0.07856, 0.52941),
(0.08091, 0.08091, 0.53333),
(0.08331, 0.08331, 0.53725),
(0.08577, 0.08577, 0.54118),
(0.08829, 0.08829, 0.54510),
(0.09086, 0.09086, 0.54902),
(0.09348, 0.09348, 0.55294),
(0.09616, 0.09616, 0.55686),
(0.09890, 0.09890, 0.56078),
(0.10169, 0.10169, 0.56471),
(0.10455, 0.10455, 0.56863),
(0.10746, 0.10746, 0.57255),
(0.11044, 0.11044, 0.57647),
(0.11347, 0.11347, 0.58039),
(0.11657, 0.11657, 0.58431),
(0.11973, 0.11973, 0.58824),
(0.12296, 0.12296, 0.59216),
(0.12624, 0.12624, 0.59608),
(0.12960, 0.12960, 0.60000),
(0.13302, 0.13302, 0.60392),
(0.13651, 0.13651, 0.60784),
(0.14007, 0.14007, 0.61176),
(0.14369, 0.14369, 0.61569),
(0.14739, 0.14739, 0.61961),
(0.15116, 0.15116, 0.62353),
(0.15500, 0.15500, 0.62745),
(0.15891, 0.15891, 0.63137),
(0.16289, 0.16289, 0.63529),
(0.16695, 0.16695, 0.63922),
(0.17109, 0.17109, 0.64314),
(0.17530, 0.17530, 0.64706),
(0.17959, 0.17959, 0.65098),
(0.18395, 0.18395, 0.65490),
(0.18840, 0.18840, 0.65882),
(0.19292, 0.19292, 0.66275),
(0.19753, 0.19753, 0.66667),
(0.20222, 0.20222, 0.67059),
(0.20699, 0.20699, 0.67451),
(0.21185, 0.21185, 0.67843),
(0.21679, 0.21679, 0.68235),
(0.22182, 0.22182, 0.68627),
(0.22693, 0.22693, 0.69020),
(0.23213, 0.23213, 0.69412),
(0.23742, 0.23742, 0.69804),
(0.24280, 0.24280, 0.70196),
(0.24827, 0.24827, 0.70588),
(0.25384, 0.25384, 0.70980),
(0.25949, 0.25949, 0.71373),
(0.26524, 0.26524, 0.71765),
(0.27109, 0.27109, 0.72157),
(0.27703, 0.27703, 0.72549),
(0.28307, 0.28307, 0.72941),
(0.28920, 0.28920, 0.73333),
(0.29544, 0.29544, 0.73725),
(0.30178, 0.30178, 0.74118),
(0.30821, 0.30821, 0.74510),
(0.31476, 0.31476, 0.74902),
(0.32140, 0.32140, 0.75294),
(0.32815, 0.32815, 0.75686),
(0.33500, 0.33500, 0.76078),
(0.34196, 0.34196, 0.76471),
(0.34903, 0.34903, 0.76863),
(0.35621, 0.35621, 0.77255),
(0.36350, 0.36350, 0.77647),
(0.37090, 0.37090, 0.78039),
(0.37841, 0.37841, 0.78431),
(0.38603, 0.38603, 0.78824),
(0.39377, 0.39377, 0.79216),
(0.40163, 0.40163, 0.79608),
(0.40960, 0.40960, 0.80000),
(0.41769, 0.41769, 0.80392),
(0.42590, 0.42590, 0.80784),
(0.43423, 0.43423, 0.81176),
(0.44268, 0.44268, 0.81569),
(0.45126, 0.45126, 0.81961),
(0.45996, 0.45996, 0.82353),
(0.46878, 0.46878, 0.82745),
(0.47773, 0.47773, 0.83137),
(0.48681, 0.48681, 0.83529),
(0.49601, 0.49601, 0.83922),
(0.50535, 0.50535, 0.84314),
(0.51482, 0.51482, 0.84706),
(0.52442, 0.52442, 0.85098),
(0.53415, 0.53415, 0.85490),
(0.54402, 0.54402, 0.85882),
(0.55403, 0.55403, 0.86275),
(0.56417, 0.56417, 0.86667),
(0.57445, 0.57445, 0.87059),
(0.58487, 0.58487, 0.87451),
(0.59543, 0.59543, 0.87843),
(0.60613, 0.60613, 0.88235),
(0.61698, 0.61698, 0.88627),
(0.62798, 0.62798, 0.89020),
(0.63911, 0.63911, 0.89412),
(0.65040, 0.65040, 0.89804),
(0.66184, 0.66184, 0.90196),
(0.67342, 0.67342, 0.90588),
(0.68516, 0.68516, 0.90980),
(0.69705, 0.69705, 0.91373),
(0.70909, 0.70909, 0.91765),
(0.72129, 0.72129, 0.92157),
(0.73365, 0.73365, 0.92549),
(0.74616, 0.74616, 0.92941),
(0.75883, 0.75883, 0.93333),
(0.77167, 0.77167, 0.93725),
(0.78466, 0.78466, 0.94118),
(0.79782, 0.79782, 0.94510),
(0.81115, 0.81115, 0.94902),
(0.82464, 0.82464, 0.95294),
(0.83830, 0.83830, 0.95686),
(0.85213, 0.85213, 0.96078),
(0.86612, 0.86612, 0.96471),
(0.88029, 0.88029, 0.96863),
(0.89464, 0.89464, 0.97255),
(0.90915, 0.90915, 0.97647),
(0.92385, 0.92385, 0.98039),
(0.93872, 0.93872, 0.98431),
(0.95377, 0.95377, 0.98824),
(0.96899, 0.96899, 0.99216),
(0.98441, 0.98441, 0.99608),
(1.00000, 1.00000, 1.00000),
)
cmap_green = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00392, 0.00000),
(0.00000, 0.00784, 0.00000),
(0.00000, 0.01176, 0.00000),
(0.00000, 0.01569, 0.00000),
(0.00000, 0.01961, 0.00000),
(0.00000, 0.02353, 0.00000),
(0.00000, 0.02745, 0.00000),
(0.00000, 0.03137, 0.00000),
(0.00000, 0.03529, 0.00000),
(0.00000, 0.03922, 0.00000),
(0.00000, 0.04314, 0.00000),
(0.00000, 0.04706, 0.00000),
(0.00000, 0.05098, 0.00000),
(0.00000, 0.05490, 0.00000),
(0.00000, 0.05882, 0.00000),
(0.00000, 0.06275, 0.00000),
(0.00000, 0.06667, 0.00000),
(0.00000, 0.07059, 0.00000),
(0.00000, 0.07451, 0.00000),
(0.00000, 0.07843, 0.00000),
(0.00000, 0.08235, 0.00000),
(0.00000, 0.08627, 0.00000),
(0.00000, 0.09020, 0.00000),
(0.00000, 0.09412, 0.00000),
(0.00000, 0.09804, 0.00000),
(0.00000, 0.10196, 0.00000),
(0.00000, 0.10588, 0.00000),
(0.00000, 0.10980, 0.00000),
(0.00000, 0.11373, 0.00000),
(0.00000, 0.11765, 0.00000),
(0.00000, 0.12157, 0.00000),
(0.00000, 0.12549, 0.00000),
(0.00000, 0.12941, 0.00000),
(0.00000, 0.13333, 0.00000),
(0.00000, 0.13725, 0.00000),
(0.00000, 0.14118, 0.00000),
(0.00000, 0.14510, 0.00000),
(0.00000, 0.14902, 0.00000),
(0.00000, 0.15294, 0.00000),
(0.00000, 0.15686, 0.00000),
(0.00000, 0.16078, 0.00000),
(0.00000, 0.16471, 0.00000),
(0.00000, 0.16863, 0.00000),
(0.00000, 0.17255, 0.00000),
(0.00000, 0.17647, 0.00000),
(0.00000, 0.18039, 0.00000),
(0.00000, 0.18431, 0.00000),
(0.00000, 0.18824, 0.00000),
(0.00000, 0.19216, 0.00000),
(0.00000, 0.19608, 0.00000),
(0.00000, 0.20000, 0.00000),
(0.00000, 0.20392, 0.00000),
(0.00000, 0.20784, 0.00000),
(0.00000, 0.21176, 0.00000),
(0.00000, 0.21569, 0.00000),
(0.00000, 0.21961, 0.00000),
(0.00000, 0.22353, 0.00000),
(0.00000, 0.22745, 0.00000),
(0.00000, 0.23137, 0.00000),
(0.00000, 0.23529, 0.00000),
(0.00000, 0.23922, 0.00000),
(0.00000, 0.24314, 0.00000),
(0.00000, 0.24706, 0.00000),
(0.00000, 0.25098, 0.00000),
(0.00000, 0.25490, 0.00000),
(0.00000, 0.25882, 0.00000),
(0.00000, 0.26275, 0.00000),
(0.00000, 0.26667, 0.00000),
(0.00000, 0.27059, 0.00000),
(0.00000, 0.27451, 0.00000),
(0.00000, 0.27843, 0.00000),
(0.00000, 0.28235, 0.00000),
(0.00000, 0.28627, 0.00000),
(0.00000, 0.29020, 0.00000),
(0.00000, 0.29412, 0.00000),
(0.00000, 0.29804, 0.00000),
(0.00000, 0.30196, 0.00000),
(0.00000, 0.30588, 0.00000),
(0.00000, 0.30980, 0.00000),
(0.00000, 0.31373, 0.00000),
(0.00000, 0.31765, 0.00000),
(0.00000, 0.32157, 0.00000),
(0.00000, 0.32549, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.33333, 0.00000),
(0.00000, 0.33725, 0.00000),
(0.00000, 0.34118, 0.00000),
(0.00000, 0.34510, 0.00000),
(0.00000, 0.34902, 0.00000),
(0.00000, 0.35294, 0.00000),
(0.00000, 0.35686, 0.00000),
(0.00000, 0.36078, 0.00000),
(0.00000, 0.36471, 0.00000),
(0.00000, 0.36863, 0.00000),
(0.00000, 0.37255, 0.00000),
(0.00000, 0.37647, 0.00000),
(0.00000, 0.38039, 0.00000),
(0.00000, 0.38431, 0.00000),
(0.00000, 0.38824, 0.00000),
(0.00000, 0.39216, 0.00000),
(0.00000, 0.39608, 0.00000),
(0.00000, 0.40000, 0.00000),
(0.00000, 0.40392, 0.00000),
(0.00000, 0.40784, 0.00000),
(0.00000, 0.41176, 0.00000),
(0.00000, 0.41569, 0.00000),
(0.00000, 0.41961, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.42745, 0.00000),
(0.00000, 0.43137, 0.00000),
(0.00000, 0.43529, 0.00000),
(0.00000, 0.43922, 0.00000),
(0.00000, 0.44314, 0.00000),
(0.00000, 0.44706, 0.00000),
(0.00000, 0.45098, 0.00000),
(0.00000, 0.45490, 0.00000),
(0.00000, 0.45882, 0.00000),
(0.00000, 0.46275, 0.00000),
(0.00000, 0.46667, 0.00000),
(0.00000, 0.47059, 0.00000),
(0.00000, 0.47451, 0.00000),
(0.00000, 0.47843, 0.00000),
(0.00000, 0.48235, 0.00000),
(0.00000, 0.48627, 0.00000),
(0.00000, 0.49020, 0.00000),
(0.00000, 0.49412, 0.00000),
(0.00000, 0.49804, 0.00000),
(0.00000, 0.50196, 0.00000),
(0.00000, 0.50588, 0.00000),
(0.00000, 0.50980, 0.00000),
(0.00000, 0.51373, 0.00000),
(0.00000, 0.51765, 0.00000),
(0.00000, 0.52157, 0.00000),
(0.00000, 0.52549, 0.00000),
(0.00000, 0.52941, 0.00000),
(0.00000, 0.53333, 0.00000),
(0.00000, 0.53725, 0.00000),
(0.00000, 0.54118, 0.00000),
(0.00000, 0.54510, 0.00000),
(0.00000, 0.54902, 0.00000),
(0.00000, 0.55294, 0.00000),
(0.00000, 0.55686, 0.00000),
(0.00000, 0.56078, 0.00000),
(0.00000, 0.56471, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.57255, 0.00000),
(0.00000, 0.57647, 0.00000),
(0.00000, 0.58039, 0.00000),
(0.00000, 0.58431, 0.00000),
(0.00000, 0.58824, 0.00000),
(0.00000, 0.59216, 0.00000),
(0.00000, 0.59608, 0.00000),
(0.00000, 0.60000, 0.00000),
(0.00000, 0.60392, 0.00000),
(0.00000, 0.60784, 0.00000),
(0.00000, 0.61176, 0.00000),
(0.00000, 0.61569, 0.00000),
(0.00000, 0.61961, 0.00000),
(0.00000, 0.62353, 0.00000),
(0.00000, 0.62745, 0.00000),
(0.00000, 0.63137, 0.00000),
(0.00000, 0.63529, 0.00000),
(0.00000, 0.63922, 0.00000),
(0.00000, 0.64314, 0.00000),
(0.00000, 0.64706, 0.00000),
(0.00000, 0.65098, 0.00000),
(0.00000, 0.65490, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.66275, 0.00000),
(0.00000, 0.66667, 0.00000),
(0.00000, 0.67059, 0.00000),
(0.00000, 0.67451, 0.00000),
(0.00000, 0.67843, 0.00000),
(0.00000, 0.68235, 0.00000),
(0.00000, 0.68627, 0.00000),
(0.00000, 0.69020, 0.00000),
(0.00000, 0.69412, 0.00000),
(0.00000, 0.69804, 0.00000),
(0.00000, 0.70196, 0.00000),
(0.00000, 0.70588, 0.00000),
(0.00000, 0.70980, 0.00000),
(0.00000, 0.71373, 0.00000),
(0.00000, 0.71765, 0.00000),
(0.00000, 0.72157, 0.00000),
(0.00000, 0.72549, 0.00000),
(0.00000, 0.72941, 0.00000),
(0.00000, 0.73333, 0.00000),
(0.00000, 0.73725, 0.00000),
(0.00000, 0.74118, 0.00000),
(0.00000, 0.74510, 0.00000),
(0.00000, 0.74902, 0.00000),
(0.00000, 0.75294, 0.00000),
(0.00000, 0.75686, 0.00000),
(0.00000, 0.76078, 0.00000),
(0.00000, 0.76471, 0.00000),
(0.00000, 0.76863, 0.00000),
(0.00000, 0.77255, 0.00000),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.78039, 0.00000),
(0.00000, 0.78431, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.79216, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.80000, 0.00000),
(0.00000, 0.80392, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.81176, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.81961, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.82745, 0.00000),
(0.00000, 0.83137, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.83922, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.85098, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.85882, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.86667, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.87451, 0.00000),
(0.00000, 0.87843, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.88627, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.89412, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.90196, 0.00000),
(0.00000, 0.90588, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.91373, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.92157, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.92941, 0.00000),
(0.00000, 0.93333, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.94118, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.94902, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.95686, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96863, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.97647, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.98431, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00000, 0.99216, 0.00000),
(0.00000, 0.99608, 0.00392),
(0.00000, 1.00000, 0.00784),
)
cmap_staircase = (
(0.00392, 0.00392, 0.31373),
(0.00784, 0.00784, 0.31373),
(0.01176, 0.01176, 0.31373),
(0.01569, 0.01569, 0.31373),
(0.01961, 0.01961, 0.31373),
(0.02353, 0.02353, 0.31373),
(0.02745, 0.02745, 0.31373),
(0.03137, 0.03137, 0.31373),
(0.03529, 0.03529, 0.31373),
(0.03922, 0.03922, 0.31373),
(0.04314, 0.04314, 0.31373),
(0.04706, 0.04706, 0.31373),
(0.05098, 0.05098, 0.31373),
(0.05490, 0.05490, 0.31373),
(0.05882, 0.05882, 0.31373),
(0.06275, 0.06275, 0.31373),
(0.06667, 0.06667, 0.47059),
(0.07059, 0.07059, 0.47059),
(0.07451, 0.07451, 0.47059),
(0.07843, 0.07843, 0.47059),
(0.08235, 0.08235, 0.47059),
(0.08627, 0.08627, 0.47059),
(0.09020, 0.09020, 0.47059),
(0.09412, 0.09412, 0.47059),
(0.09804, 0.09804, 0.47059),
(0.10196, 0.10196, 0.47059),
(0.10588, 0.10588, 0.47059),
(0.10980, 0.10980, 0.47059),
(0.11373, 0.11373, 0.47059),
(0.11765, 0.11765, 0.47059),
(0.12157, 0.12157, 0.47059),
(0.12549, 0.12549, 0.47059),
(0.12941, 0.12941, 0.62745),
(0.13333, 0.13333, 0.62745),
(0.13725, 0.13725, 0.62745),
(0.14118, 0.14118, 0.62745),
(0.14510, 0.14510, 0.62745),
(0.14902, 0.14902, 0.62745),
(0.15294, 0.15294, 0.62745),
(0.15686, 0.15686, 0.62745),
(0.16078, 0.16078, 0.62745),
(0.16471, 0.16471, 0.62745),
(0.16863, 0.16863, 0.62745),
(0.17255, 0.17255, 0.62745),
(0.17647, 0.17647, 0.62745),
(0.18039, 0.18039, 0.62745),
(0.18431, 0.18431, 0.62745),
(0.18824, 0.18824, 0.62745),
(0.19216, 0.19216, 0.78431),
(0.19608, 0.19608, 0.78431),
(0.20000, 0.20000, 0.78431),
(0.20392, 0.20392, 0.78431),
(0.20784, 0.20784, 0.78431),
(0.21176, 0.21176, 0.78431),
(0.21569, 0.21569, 0.78431),
(0.21961, 0.21961, 0.78431),
(0.22353, 0.22353, 0.78431),
(0.22745, 0.22745, 0.78431),
(0.23137, 0.23137, 0.78431),
(0.23529, 0.23529, 0.78431),
(0.23922, 0.23922, 0.78431),
(0.24314, 0.24314, 0.78431),
(0.24706, 0.24706, 0.78431),
(0.25098, 0.25098, 0.78431),
(0.25490, 0.25490, 0.94118),
(0.25882, 0.25882, 0.94118),
(0.26275, 0.26275, 0.94118),
(0.26667, 0.26667, 0.94118),
(0.27059, 0.27059, 0.94118),
(0.27451, 0.27451, 0.94118),
(0.27843, 0.27843, 0.94118),
(0.28235, 0.28235, 0.94118),
(0.28627, 0.28627, 0.94118),
(0.29020, 0.29020, 0.94118),
(0.29412, 0.29412, 0.94118),
(0.29804, 0.29804, 0.94118),
(0.30196, 0.30196, 0.94118),
(0.30588, 0.30588, 0.94118),
(0.30980, 0.30980, 0.94118),
(0.31373, 0.31373, 0.94118),
(0.31765, 0.31765, 0.95294),
(0.32157, 0.32157, 0.96471),
(0.32549, 0.32549, 0.97647),
(0.32941, 0.32941, 0.98824),
(0.33333, 0.33333, 1.00000),
(0.00392, 0.31373, 0.00392),
(0.00784, 0.31373, 0.00784),
(0.01176, 0.31373, 0.01176),
(0.01569, 0.31373, 0.01569),
(0.01961, 0.31373, 0.01961),
(0.02353, 0.31373, 0.02353),
(0.02745, 0.31373, 0.02745),
(0.03137, 0.31373, 0.03137),
(0.03529, 0.31373, 0.03529),
(0.03922, 0.31373, 0.03922),
(0.04314, 0.31373, 0.04314),
(0.04706, 0.31373, 0.04706),
(0.05098, 0.31373, 0.05098),
(0.05490, 0.31373, 0.05490),
(0.05882, 0.31373, 0.05882),
(0.06275, 0.31373, 0.06275),
(0.06667, 0.47059, 0.06667),
(0.07059, 0.47059, 0.07059),
(0.07451, 0.47059, 0.07451),
(0.07843, 0.47059, 0.07843),
(0.08235, 0.47059, 0.08235),
(0.08627, 0.47059, 0.08627),
(0.09020, 0.47059, 0.09020),
(0.09412, 0.47059, 0.09412),
(0.09804, 0.47059, 0.09804),
(0.10196, 0.47059, 0.10196),
(0.10588, 0.47059, 0.10588),
(0.10980, 0.47059, 0.10980),
(0.11373, 0.47059, 0.11373),
(0.11765, 0.47059, 0.11765),
(0.12157, 0.47059, 0.12157),
(0.12549, 0.47059, 0.12549),
(0.12941, 0.62745, 0.12941),
(0.13333, 0.62745, 0.13333),
(0.13725, 0.62745, 0.13725),
(0.14118, 0.62745, 0.14118),
(0.14510, 0.62745, 0.14510),
(0.14902, 0.62745, 0.14902),
(0.15294, 0.62745, 0.15294),
(0.15686, 0.62745, 0.15686),
(0.16078, 0.62745, 0.16078),
(0.16471, 0.62745, 0.16471),
(0.16863, 0.62745, 0.16863),
(0.17255, 0.62745, 0.17255),
(0.17647, 0.62745, 0.17647),
(0.18039, 0.62745, 0.18039),
(0.18431, 0.62745, 0.18431),
(0.18824, 0.62745, 0.18824),
(0.19216, 0.78431, 0.19216),
(0.19608, 0.78431, 0.19608),
(0.20000, 0.78431, 0.20000),
(0.20392, 0.78431, 0.20392),
(0.20784, 0.78431, 0.20784),
(0.21176, 0.78431, 0.21176),
(0.21569, 0.78431, 0.21569),
(0.21961, 0.78431, 0.21961),
(0.22353, 0.78431, 0.22353),
(0.22745, 0.78431, 0.22745),
(0.23137, 0.78431, 0.23137),
(0.23529, 0.78431, 0.23529),
(0.23922, 0.78431, 0.23922),
(0.24314, 0.78431, 0.24314),
(0.24706, 0.78431, 0.24706),
(0.25098, 0.78431, 0.25098),
(0.25490, 0.94118, 0.25490),
(0.25882, 0.94118, 0.25882),
(0.26275, 0.94118, 0.26275),
(0.26667, 0.94118, 0.26667),
(0.27059, 0.94118, 0.27059),
(0.27451, 0.94118, 0.27451),
(0.27843, 0.94118, 0.27843),
(0.28235, 0.94118, 0.28235),
(0.28627, 0.94118, 0.28627),
(0.29020, 0.94118, 0.29020),
(0.29412, 0.94118, 0.29412),
(0.29804, 0.94118, 0.29804),
(0.30196, 0.94118, 0.30196),
(0.30588, 0.94118, 0.30588),
(0.30980, 0.94118, 0.30980),
(0.31373, 0.94118, 0.31373),
(0.31765, 0.95294, 0.31765),
(0.32157, 0.96471, 0.32157),
(0.32549, 0.97647, 0.32549),
(0.32941, 0.98824, 0.32941),
(0.33333, 1.00000, 0.33333),
(0.31373, 0.00392, 0.00392),
(0.31373, 0.00784, 0.00784),
(0.31373, 0.01176, 0.01176),
(0.31373, 0.01569, 0.01569),
(0.31373, 0.01961, 0.01961),
(0.31373, 0.02353, 0.02353),
(0.31373, 0.02745, 0.02745),
(0.31373, 0.03137, 0.03137),
(0.31373, 0.03529, 0.03529),
(0.31373, 0.03922, 0.03922),
(0.31373, 0.04314, 0.04314),
(0.31373, 0.04706, 0.04706),
(0.31373, 0.05098, 0.05098),
(0.31373, 0.05490, 0.05490),
(0.31373, 0.05882, 0.05882),
(0.31373, 0.06275, 0.06275),
(0.47059, 0.06667, 0.06667),
(0.47059, 0.07059, 0.07059),
(0.47059, 0.07451, 0.07451),
(0.47059, 0.07843, 0.07843),
(0.47059, 0.08235, 0.08235),
(0.47059, 0.08627, 0.08627),
(0.47059, 0.09020, 0.09020),
(0.47059, 0.09412, 0.09412),
(0.47059, 0.09804, 0.09804),
(0.47059, 0.10196, 0.10196),
(0.47059, 0.10588, 0.10588),
(0.47059, 0.10980, 0.10980),
(0.47059, 0.11373, 0.11373),
(0.47059, 0.11765, 0.11765),
(0.47059, 0.12157, 0.12157),
(0.47059, 0.12549, 0.12549),
(0.62745, 0.12941, 0.12941),
(0.62745, 0.13333, 0.13333),
(0.62745, 0.13725, 0.13725),
(0.62745, 0.14118, 0.14118),
(0.62745, 0.14510, 0.14510),
(0.62745, 0.14902, 0.14902),
(0.62745, 0.15294, 0.15294),
(0.62745, 0.15686, 0.15686),
(0.62745, 0.16078, 0.16078),
(0.62745, 0.16471, 0.16471),
(0.62745, 0.16863, 0.16863),
(0.62745, 0.17255, 0.17255),
(0.62745, 0.17647, 0.17647),
(0.62745, 0.18039, 0.18039),
(0.62745, 0.18431, 0.18431),
(0.62745, 0.18824, 0.18824),
(0.78431, 0.19216, 0.19216),
(0.78431, 0.19608, 0.19608),
(0.78431, 0.20000, 0.20000),
(0.78431, 0.20392, 0.20392),
(0.78431, 0.20784, 0.20784),
(0.78431, 0.21176, 0.21176),
(0.78431, 0.21569, 0.21569),
(0.78431, 0.21961, 0.21961),
(0.78431, 0.22353, 0.22353),
(0.78431, 0.22745, 0.22745),
(0.78431, 0.23137, 0.23137),
(0.78431, 0.23529, 0.23529),
(0.78431, 0.23922, 0.23922),
(0.78431, 0.24314, 0.24314),
(0.78431, 0.24706, 0.24706),
(0.78431, 0.25098, 0.25098),
(0.94118, 0.25490, 0.25490),
(0.94118, 0.25882, 0.25882),
(0.94118, 0.26275, 0.26275),
(0.94118, 0.26667, 0.26667),
(0.94118, 0.27059, 0.27059),
(0.94118, 0.27451, 0.27451),
(0.94118, 0.27843, 0.27843),
(0.94118, 0.28235, 0.28235),
(0.94118, 0.28627, 0.28627),
(0.94118, 0.29020, 0.29020),
(0.94118, 0.29412, 0.29412),
(0.94118, 0.29804, 0.29804),
(0.94118, 0.30196, 0.30196),
(0.94118, 0.30588, 0.30588),
(0.94118, 0.30980, 0.30980),
(0.94118, 0.31373, 0.31373),
(0.94902, 0.39216, 0.39216),
(0.96078, 0.52941, 0.52941),
(0.97255, 0.66667, 0.66667),
(0.98431, 0.80392, 0.80392),
(0.99216, 0.80000, 0.80000),
(1.00000, 1.00000, 1.00000),
)
cmap_random = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98431, 0.81176, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.76863, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 0.89804, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.86275, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.70588, 1.00000),
(0.92157, 0.61961, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.78431, 0.47059, 1.00000),
(0.65882, 0.59608, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.62745, 0.62745, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.47059, 0.78431, 1.00000),
(0.26275, 0.87843, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.74118),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65490),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.36078, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.98431, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.97647, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98431, 0.85098, 0.00000),
(0.98824, 0.77647, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.72549, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.36863, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.46667),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(0.91373, 0.00000, 0.97255),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.70588, 0.00000, 0.90196),
(0.53333, 0.00000, 0.87451),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.47059, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.80392),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
)
cmap_blue = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00392),
(0.00000, 0.00000, 0.00784),
(0.00000, 0.00000, 0.01176),
(0.00000, 0.00000, 0.01569),
(0.00000, 0.00000, 0.01961),
(0.00000, 0.00000, 0.02353),
(0.00000, 0.00000, 0.02745),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.03529),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.04314),
(0.00000, 0.00000, 0.04706),
(0.00000, 0.00000, 0.05098),
(0.00000, 0.00000, 0.05490),
(0.00000, 0.00000, 0.05882),
(0.00000, 0.00000, 0.06275),
(0.00000, 0.00000, 0.06667),
(0.00000, 0.00000, 0.07059),
(0.00000, 0.00000, 0.07451),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.08235),
(0.00000, 0.00000, 0.08627),
(0.00000, 0.00000, 0.09020),
(0.00000, 0.00000, 0.09412),
(0.00000, 0.00000, 0.09804),
(0.00000, 0.00000, 0.10196),
(0.00000, 0.00000, 0.10588),
(0.00000, 0.00000, 0.10980),
(0.00000, 0.00000, 0.11373),
(0.00000, 0.00000, 0.11765),
(0.00000, 0.00000, 0.12157),
(0.00000, 0.00000, 0.12549),
(0.00000, 0.00000, 0.12941),
(0.00000, 0.00000, 0.13333),
(0.00000, 0.00000, 0.13725),
(0.00000, 0.00000, 0.14118),
(0.00000, 0.00000, 0.14510),
(0.00000, 0.00000, 0.14902),
(0.00000, 0.00000, 0.15294),
(0.00000, 0.00000, 0.15686),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.16471),
(0.00000, 0.00000, 0.16863),
(0.00000, 0.00000, 0.17255),
(0.00000, 0.00000, 0.17647),
(0.00000, 0.00000, 0.18039),
(0.00000, 0.00000, 0.18431),
(0.00000, 0.00000, 0.18824),
(0.00000, 0.00000, 0.19216),
(0.00000, 0.00000, 0.19608),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.20392),
(0.00000, 0.00000, 0.20784),
(0.00000, 0.00000, 0.21176),
(0.00000, 0.00000, 0.21569),
(0.00000, 0.00000, 0.21961),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.22745),
(0.00000, 0.00000, 0.23137),
(0.00000, 0.00000, 0.23529),
(0.00000, 0.00000, 0.23922),
(0.00000, 0.00000, 0.24314),
(0.00000, 0.00000, 0.24706),
(0.00000, 0.00000, 0.25098),
(0.00000, 0.00000, 0.25490),
(0.00000, 0.00000, 0.25882),
(0.00000, 0.00000, 0.26275),
(0.00000, 0.00000, 0.26667),
(0.00000, 0.00000, 0.27059),
(0.00000, 0.00000, 0.27451),
(0.00000, 0.00000, 0.27843),
(0.00000, 0.00000, 0.28235),
(0.00000, 0.00000, 0.28627),
(0.00000, 0.00000, 0.29020),
(0.00000, 0.00000, 0.29412),
(0.00000, 0.00000, 0.29804),
(0.00000, 0.00000, 0.30196),
(0.00000, 0.00000, 0.30588),
(0.00000, 0.00000, 0.30980),
(0.00000, 0.00000, 0.31373),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.32157),
(0.00000, 0.00000, 0.32549),
(0.00000, 0.00000, 0.32941),
(0.00000, 0.00000, 0.33333),
(0.00000, 0.00000, 0.33725),
(0.00000, 0.00000, 0.34118),
(0.00000, 0.00000, 0.34510),
(0.00000, 0.00000, 0.34902),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.35686),
(0.00000, 0.00000, 0.36078),
(0.00000, 0.00000, 0.36471),
(0.00000, 0.00000, 0.36863),
(0.00000, 0.00000, 0.37255),
(0.00000, 0.00000, 0.37647),
(0.00000, 0.00000, 0.38039),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.38824),
(0.00000, 0.00000, 0.39216),
(0.00000, 0.00000, 0.39608),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.40392),
(0.00000, 0.00000, 0.40784),
(0.00000, 0.00000, 0.41176),
(0.00000, 0.00000, 0.41569),
(0.00000, 0.00000, 0.41961),
(0.00000, 0.00000, 0.42353),
(0.00000, 0.00000, 0.42745),
(0.00000, 0.00000, 0.43137),
(0.00000, 0.00000, 0.43529),
(0.00000, 0.00000, 0.43922),
(0.00000, 0.00000, 0.44314),
(0.00000, 0.00000, 0.44706),
(0.00000, 0.00000, 0.45098),
(0.00000, 0.00000, 0.45490),
(0.00000, 0.00000, 0.45882),
(0.00000, 0.00000, 0.46275),
(0.00000, 0.00000, 0.46667),
(0.00000, 0.00000, 0.47059),
(0.00000, 0.00000, 0.47451),
(0.00000, 0.00000, 0.47843),
(0.00000, 0.00000, 0.48235),
(0.00000, 0.00000, 0.48627),
(0.00000, 0.00000, 0.49020),
(0.00000, 0.00000, 0.49412),
(0.00000, 0.00000, 0.49804),
(0.00000, 0.00000, 0.50196),
(0.00000, 0.00000, 0.50588),
(0.00000, 0.00000, 0.50980),
(0.00000, 0.00000, 0.51373),
(0.00000, 0.00000, 0.51765),
(0.00000, 0.00000, 0.52157),
(0.00000, 0.00000, 0.52549),
(0.00000, 0.00000, 0.52941),
(0.00000, 0.00000, 0.53333),
(0.00000, 0.00000, 0.53725),
(0.00000, 0.00000, 0.54118),
(0.00000, 0.00000, 0.54510),
(0.00000, 0.00000, 0.54902),
(0.00000, 0.00000, 0.55294),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.56078),
(0.00000, 0.00000, 0.56471),
(0.00000, 0.00000, 0.56863),
(0.00000, 0.00000, 0.57255),
(0.00000, 0.00000, 0.57647),
(0.00000, 0.00000, 0.58039),
(0.00000, 0.00000, 0.58431),
(0.00000, 0.00000, 0.58824),
(0.00000, 0.00000, 0.59216),
(0.00000, 0.00000, 0.59608),
(0.00000, 0.00000, 0.60000),
(0.00000, 0.00000, 0.60392),
(0.00000, 0.00000, 0.60784),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.61569),
(0.00000, 0.00000, 0.61961),
(0.00000, 0.00000, 0.62353),
(0.00000, 0.00000, 0.62745),
(0.00000, 0.00000, 0.63137),
(0.00000, 0.00000, 0.63529),
(0.00000, 0.00000, 0.63922),
(0.00000, 0.00000, 0.64314),
(0.00000, 0.00000, 0.64706),
(0.00000, 0.00000, 0.65098),
(0.00000, 0.00000, 0.65490),
(0.00000, 0.00000, 0.65882),
(0.00000, 0.00000, 0.66275),
(0.00000, 0.00000, 0.66667),
(0.00000, 0.00000, 0.67059),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.67843),
(0.00000, 0.00000, 0.68235),
(0.00000, 0.00000, 0.68627),
(0.00000, 0.00000, 0.69020),
(0.00000, 0.00000, 0.69412),
(0.00000, 0.00000, 0.69804),
(0.00000, 0.00000, 0.70196),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.70980),
(0.00000, 0.00000, 0.71373),
(0.00000, 0.00000, 0.71765),
(0.00000, 0.00000, 0.72157),
(0.00000, 0.00000, 0.72549),
(0.00000, 0.00000, 0.72941),
(0.00000, 0.00000, 0.73333),
(0.00000, 0.00000, 0.73725),
(0.00000, 0.00000, 0.74118),
(0.00000, 0.00000, 0.74510),
(0.00000, 0.00000, 0.74902),
(0.00000, 0.00000, 0.75294),
(0.00000, 0.00000, 0.75686),
(0.00000, 0.00000, 0.76078),
(0.00000, 0.00000, 0.76471),
(0.00000, 0.00000, 0.76863),
(0.00000, 0.00000, 0.77255),
(0.00000, 0.00000, 0.77647),
(0.00000, 0.00000, 0.78039),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78824),
(0.00000, 0.00000, 0.79216),
(0.00000, 0.00000, 0.79608),
(0.00000, 0.00000, 0.80000),
(0.00000, 0.00000, 0.80392),
(0.00000, 0.00000, 0.80784),
(0.00000, 0.00000, 0.81176),
(0.00000, 0.00000, 0.81569),
(0.00000, 0.00000, 0.81961),
(0.00000, 0.00000, 0.82353),
(0.00000, 0.00000, 0.82745),
(0.00000, 0.00000, 0.83137),
(0.00000, 0.00000, 0.83529),
(0.00000, 0.00000, 0.83922),
(0.00000, 0.00000, 0.84314),
(0.00000, 0.00000, 0.84706),
(0.00000, 0.00000, 0.85098),
(0.00000, 0.00000, 0.85490),
(0.00000, 0.00000, 0.85882),
(0.00000, 0.00000, 0.86275),
(0.00000, 0.00000, 0.86667),
(0.00000, 0.00000, 0.87059),
(0.00000, 0.00000, 0.87451),
(0.00000, 0.00000, 0.87843),
(0.00000, 0.00000, 0.88235),
(0.00000, 0.00000, 0.88627),
(0.00000, 0.00000, 0.89020),
(0.00000, 0.00000, 0.89412),
(0.00000, 0.00000, 0.89804),
(0.00000, 0.00000, 0.90196),
(0.00000, 0.00000, 0.90588),
(0.00000, 0.00000, 0.90980),
(0.00000, 0.00000, 0.91373),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.92157),
(0.00000, 0.00000, 0.92549),
(0.00000, 0.00000, 0.92941),
(0.00000, 0.00000, 0.93333),
(0.00000, 0.00000, 0.93725),
(0.00000, 0.00000, 0.94118),
(0.00000, 0.00000, 0.94510),
(0.00000, 0.00000, 0.94902),
(0.00000, 0.00000, 0.95294),
(0.00000, 0.00000, 0.95686),
(0.00000, 0.00000, 0.96078),
(0.00000, 0.00000, 0.96471),
(0.00000, 0.00000, 0.96863),
(0.00000, 0.00000, 0.97255),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 0.98039),
(0.00000, 0.00392, 0.98431),
(0.00000, 0.00784, 0.98824),
(0.00000, 0.01176, 0.99216),
(0.00000, 0.01569, 0.99608),
(0.00000, 0.00392, 1.00000),
)
cmap_red = (
(0.00000, 0.00000, 0.00000),
(0.00392, 0.00000, 0.00000),
(0.00784, 0.00000, 0.00000),
(0.01176, 0.00000, 0.00000),
(0.01569, 0.00000, 0.00000),
(0.01961, 0.00000, 0.00000),
(0.02353, 0.00000, 0.00000),
(0.02745, 0.00000, 0.00000),
(0.03137, 0.00000, 0.00000),
(0.03529, 0.00000, 0.00000),
(0.03922, 0.00000, 0.00000),
(0.04314, 0.00000, 0.00000),
(0.04706, 0.00000, 0.00000),
(0.05098, 0.00000, 0.00000),
(0.05490, 0.00000, 0.00000),
(0.05882, 0.00000, 0.00000),
(0.06275, 0.00000, 0.00000),
(0.06667, 0.00000, 0.00000),
(0.07059, 0.00000, 0.00000),
(0.07451, 0.00000, 0.00000),
(0.07843, 0.00000, 0.00000),
(0.08235, 0.00000, 0.00000),
(0.08627, 0.00000, 0.00000),
(0.09020, 0.00000, 0.00000),
(0.09412, 0.00000, 0.00000),
(0.09804, 0.00000, 0.00000),
(0.10196, 0.00000, 0.00000),
(0.10588, 0.00000, 0.00000),
(0.10980, 0.00000, 0.00000),
(0.11373, 0.00000, 0.00000),
(0.11765, 0.00000, 0.00000),
(0.12157, 0.00000, 0.00000),
(0.12549, 0.00000, 0.00000),
(0.12941, 0.00000, 0.00000),
(0.13333, 0.00000, 0.00000),
(0.13725, 0.00000, 0.00000),
(0.14118, 0.00000, 0.00000),
(0.14510, 0.00000, 0.00000),
(0.14902, 0.00000, 0.00000),
(0.15294, 0.00000, 0.00000),
(0.15686, 0.00000, 0.00000),
(0.16078, 0.00000, 0.00000),
(0.16471, 0.00000, 0.00000),
(0.16863, 0.00000, 0.00000),
(0.17255, 0.00000, 0.00000),
(0.17647, 0.00000, 0.00000),
(0.18039, 0.00000, 0.00000),
(0.18431, 0.00000, 0.00000),
(0.18824, 0.00000, 0.00000),
(0.19216, 0.00000, 0.00000),
(0.19608, 0.00000, 0.00000),
(0.20000, 0.00000, 0.00000),
(0.20392, 0.00000, 0.00000),
(0.20784, 0.00000, 0.00000),
(0.21176, 0.00000, 0.00000),
(0.21569, 0.00000, 0.00000),
(0.21961, 0.00000, 0.00000),
(0.22353, 0.00000, 0.00000),
(0.22745, 0.00000, 0.00000),
(0.23137, 0.00000, 0.00000),
(0.23529, 0.00000, 0.00000),
(0.23922, 0.00000, 0.00000),
(0.24314, 0.00000, 0.00000),
(0.24706, 0.00000, 0.00000),
(0.25098, 0.00000, 0.00000),
(0.25490, 0.00000, 0.00000),
(0.25882, 0.00000, 0.00000),
(0.26275, 0.00000, 0.00000),
(0.26667, 0.00000, 0.00000),
(0.27059, 0.00000, 0.00000),
(0.27451, 0.00000, 0.00000),
(0.27843, 0.00000, 0.00000),
(0.28235, 0.00000, 0.00000),
(0.28627, 0.00000, 0.00000),
(0.29020, 0.00000, 0.00000),
(0.29412, 0.00000, 0.00000),
(0.29804, 0.00000, 0.00000),
(0.30196, 0.00000, 0.00000),
(0.30588, 0.00000, 0.00000),
(0.30980, 0.00000, 0.00000),
(0.31373, 0.00000, 0.00000),
(0.31765, 0.00000, 0.00000),
(0.32157, 0.00000, 0.00000),
(0.32549, 0.00000, 0.00000),
(0.32941, 0.00000, 0.00000),
(0.33333, 0.00000, 0.00000),
(0.33725, 0.00000, 0.00000),
(0.34118, 0.00000, 0.00000),
(0.34510, 0.00000, 0.00000),
(0.34902, 0.00000, 0.00000),
(0.35294, 0.00000, 0.00000),
(0.35686, 0.00000, 0.00000),
(0.36078, 0.00000, 0.00000),
(0.36471, 0.00000, 0.00000),
(0.36863, 0.00000, 0.00000),
(0.37255, 0.00000, 0.00000),
(0.37647, 0.00000, 0.00000),
(0.38039, 0.00000, 0.00000),
(0.38431, 0.00000, 0.00000),
(0.38824, 0.00000, 0.00000),
(0.39216, 0.00000, 0.00000),
(0.39608, 0.00000, 0.00000),
(0.40000, 0.00000, 0.00000),
(0.40392, 0.00000, 0.00000),
(0.40784, 0.00000, 0.00000),
(0.41176, 0.00000, 0.00000),
(0.41569, 0.00000, 0.00000),
(0.41961, 0.00000, 0.00000),
(0.42353, 0.00000, 0.00000),
(0.42745, 0.00000, 0.00000),
(0.43137, 0.00000, 0.00000),
(0.43529, 0.00000, 0.00000),
(0.43922, 0.00000, 0.00000),
(0.44314, 0.00000, 0.00000),
(0.44706, 0.00000, 0.00000),
(0.45098, 0.00000, 0.00000),
(0.45490, 0.00000, 0.00000),
(0.45882, 0.00000, 0.00000),
(0.46275, 0.00000, 0.00000),
(0.46667, 0.00000, 0.00000),
(0.47059, 0.00000, 0.00000),
(0.47451, 0.00000, 0.00000),
(0.47843, 0.00000, 0.00000),
(0.48235, 0.00000, 0.00000),
(0.48627, 0.00000, 0.00000),
(0.49020, 0.00000, 0.00000),
(0.49412, 0.00000, 0.00000),
(0.49804, 0.00000, 0.00000),
(0.50196, 0.00000, 0.00000),
(0.50588, 0.00000, 0.00000),
(0.50980, 0.00000, 0.00000),
(0.51373, 0.00000, 0.00000),
(0.51765, 0.00000, 0.00000),
(0.52157, 0.00000, 0.00000),
(0.52549, 0.00000, 0.00000),
(0.52941, 0.00000, 0.00000),
(0.53333, 0.00000, 0.00000),
(0.53725, 0.00000, 0.00000),
(0.54118, 0.00000, 0.00000),
(0.54510, 0.00000, 0.00000),
(0.54902, 0.00000, 0.00000),
(0.55294, 0.00000, 0.00000),
(0.55686, 0.00000, 0.00000),
(0.56078, 0.00000, 0.00000),
(0.56471, 0.00000, 0.00000),
(0.56863, 0.00000, 0.00000),
(0.57255, 0.00000, 0.00000),
(0.57647, 0.00000, 0.00000),
(0.58039, 0.00000, 0.00000),
(0.58431, 0.00000, 0.00000),
(0.58824, 0.00000, 0.00000),
(0.59216, 0.00000, 0.00000),
(0.59608, 0.00000, 0.00000),
(0.60000, 0.00000, 0.00000),
(0.60392, 0.00000, 0.00000),
(0.60784, 0.00000, 0.00000),
(0.61176, 0.00000, 0.00000),
(0.61569, 0.00000, 0.00000),
(0.61961, 0.00000, 0.00000),
(0.62353, 0.00000, 0.00000),
(0.62745, 0.00000, 0.00000),
(0.63137, 0.00000, 0.00000),
(0.63529, 0.00000, 0.00000),
(0.63922, 0.00000, 0.00000),
(0.64314, 0.00000, 0.00000),
(0.64706, 0.00000, 0.00000),
(0.65098, 0.00000, 0.00000),
(0.65490, 0.00000, 0.00000),
(0.65882, 0.00000, 0.00000),
(0.66275, 0.00000, 0.00000),
(0.66667, 0.00000, 0.00000),
(0.67059, 0.00000, 0.00000),
(0.67451, 0.00000, 0.00000),
(0.67843, 0.00000, 0.00000),
(0.68235, 0.00000, 0.00000),
(0.68627, 0.00000, 0.00000),
(0.69020, 0.00000, 0.00000),
(0.69412, 0.00000, 0.00000),
(0.69804, 0.00000, 0.00000),
(0.70196, 0.00000, 0.00000),
(0.70588, 0.00000, 0.00000),
(0.70980, 0.00000, 0.00000),
(0.71373, 0.00000, 0.00000),
(0.71765, 0.00000, 0.00000),
(0.72157, 0.00000, 0.00000),
(0.72549, 0.00000, 0.00000),
(0.72941, 0.00000, 0.00000),
(0.73333, 0.00000, 0.00000),
(0.73725, 0.00000, 0.00000),
(0.74118, 0.00000, 0.00000),
(0.74510, 0.00000, 0.00000),
(0.74902, 0.00000, 0.00000),
(0.75294, 0.00000, 0.00000),
(0.75686, 0.00000, 0.00000),
(0.76078, 0.00000, 0.00000),
(0.76471, 0.00000, 0.00000),
(0.76863, 0.00000, 0.00000),
(0.77255, 0.00000, 0.00000),
(0.77647, 0.00000, 0.00000),
(0.78039, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.79216, 0.00000, 0.00000),
(0.79608, 0.00000, 0.00000),
(0.80000, 0.00000, 0.00000),
(0.80392, 0.00000, 0.00000),
(0.80784, 0.00000, 0.00000),
(0.81176, 0.00000, 0.00000),
(0.81569, 0.00000, 0.00000),
(0.81961, 0.00000, 0.00000),
(0.82353, 0.00000, 0.00000),
(0.82745, 0.00000, 0.00000),
(0.83137, 0.00000, 0.00000),
(0.83529, 0.00000, 0.00000),
(0.83922, 0.00000, 0.00000),
(0.84314, 0.00000, 0.00000),
(0.84706, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.85490, 0.00000, 0.00000),
(0.85882, 0.00000, 0.00000),
(0.86275, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.87059, 0.00000, 0.00000),
(0.87451, 0.00000, 0.00000),
(0.87843, 0.00000, 0.00000),
(0.88235, 0.00000, 0.00000),
(0.88627, 0.00000, 0.00000),
(0.89020, 0.00000, 0.00000),
(0.89412, 0.00000, 0.00000),
(0.89804, 0.00000, 0.00000),
(0.90196, 0.00000, 0.00000),
(0.90588, 0.00000, 0.00000),
(0.90980, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.91765, 0.00000, 0.00000),
(0.92157, 0.00000, 0.00000),
(0.92549, 0.00000, 0.00000),
(0.92941, 0.00000, 0.00000),
(0.93333, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.94118, 0.00000, 0.00000),
(0.94510, 0.00000, 0.00000),
(0.94902, 0.00000, 0.00000),
(0.95294, 0.00000, 0.00000),
(0.95686, 0.00000, 0.00000),
(0.96078, 0.00000, 0.00000),
(0.96471, 0.00000, 0.00000),
(0.96863, 0.00000, 0.00000),
(0.97255, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.98039, 0.00000, 0.00000),
(0.98431, 0.00000, 0.00000),
(0.98824, 0.00000, 0.00000),
(0.99216, 0.00000, 0.00000),
(0.99608, 0.00000, 0.00392),
(1.00000, 0.00000, 0.00784),
)
cmap_aips0 = (
(0.00000, 0.00000, 0.00000),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.47451, 0.00000, 0.60784),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.00000, 0.00000, 0.78431),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.37255, 0.65490, 0.92549),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.69412, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_stairs8 = (
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(0.76471, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_idl11 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00392, 0.00392),
(0.00000, 0.00784, 0.00784),
(0.00000, 0.01176, 0.01176),
(0.00000, 0.01569, 0.01569),
(0.00000, 0.03137, 0.03137),
(0.00000, 0.04706, 0.04706),
(0.00000, 0.06275, 0.06275),
(0.00000, 0.08235, 0.08235),
(0.00000, 0.09804, 0.09804),
(0.00000, 0.11373, 0.11373),
(0.00000, 0.12941, 0.12941),
(0.00000, 0.14902, 0.14902),
(0.00000, 0.16471, 0.16471),
(0.00000, 0.18039, 0.18039),
(0.00000, 0.19608, 0.19608),
(0.00000, 0.21569, 0.21569),
(0.00000, 0.23137, 0.23137),
(0.00000, 0.24706, 0.24706),
(0.00000, 0.26275, 0.26275),
(0.00000, 0.28235, 0.28235),
(0.00000, 0.29804, 0.29804),
(0.00000, 0.31373, 0.31373),
(0.00000, 0.32941, 0.32941),
(0.00000, 0.34902, 0.34902),
(0.00000, 0.36471, 0.36471),
(0.00000, 0.38039, 0.38039),
(0.00000, 0.39608, 0.39608),
(0.00000, 0.41569, 0.41569),
(0.00000, 0.43137, 0.43137),
(0.00000, 0.44706, 0.44706),
(0.00000, 0.46275, 0.46275),
(0.00000, 0.48235, 0.48235),
(0.00000, 0.49804, 0.49804),
(0.00000, 0.51373, 0.51373),
(0.00000, 0.52941, 0.52941),
(0.00000, 0.54902, 0.54902),
(0.00000, 0.56471, 0.56471),
(0.00000, 0.58039, 0.58039),
(0.00000, 0.59608, 0.59608),
(0.00000, 0.61569, 0.61569),
(0.00000, 0.63137, 0.63137),
(0.00000, 0.64706, 0.64706),
(0.00000, 0.66275, 0.66275),
(0.00000, 0.68235, 0.68235),
(0.00000, 0.69804, 0.69804),
(0.00000, 0.71373, 0.71373),
(0.00000, 0.72941, 0.72941),
(0.00000, 0.74902, 0.74902),
(0.00000, 0.76471, 0.76471),
(0.00000, 0.78039, 0.78039),
(0.00000, 0.79608, 0.79608),
(0.00000, 0.81569, 0.81569),
(0.00000, 0.83137, 0.83137),
(0.00000, 0.84706, 0.84706),
(0.00000, 0.86275, 0.86275),
(0.00000, 0.88235, 0.88235),
(0.00000, 0.89804, 0.89804),
(0.00000, 0.91373, 0.91373),
(0.00000, 0.92941, 0.92941),
(0.00000, 0.94902, 0.94902),
(0.00000, 0.96471, 0.96471),
(0.00000, 0.98039, 0.98039),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 0.98431, 1.00000),
(0.00000, 0.96863, 1.00000),
(0.00000, 0.95294, 1.00000),
(0.00000, 0.93725, 1.00000),
(0.00000, 0.92157, 1.00000),
(0.00000, 0.90588, 1.00000),
(0.00000, 0.89020, 1.00000),
(0.00000, 0.87451, 1.00000),
(0.00000, 0.85882, 1.00000),
(0.00000, 0.84314, 1.00000),
(0.00000, 0.82745, 1.00000),
(0.00000, 0.81176, 1.00000),
(0.00000, 0.79608, 1.00000),
(0.00000, 0.78039, 1.00000),
(0.00000, 0.76471, 1.00000),
(0.00000, 0.74902, 1.00000),
(0.00000, 0.73333, 1.00000),
(0.00000, 0.71765, 1.00000),
(0.00000, 0.70196, 1.00000),
(0.00000, 0.68627, 1.00000),
(0.00000, 0.66667, 1.00000),
(0.00000, 0.65098, 1.00000),
(0.00000, 0.63529, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.60392, 1.00000),
(0.00000, 0.58824, 1.00000),
(0.00000, 0.57255, 1.00000),
(0.00000, 0.55686, 1.00000),
(0.00000, 0.54118, 1.00000),
(0.00000, 0.52549, 1.00000),
(0.00000, 0.50980, 1.00000),
(0.00000, 0.49412, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.46275, 1.00000),
(0.00000, 0.44706, 1.00000),
(0.00000, 0.43137, 1.00000),
(0.00000, 0.41569, 1.00000),
(0.00000, 0.40000, 1.00000),
(0.00000, 0.38431, 1.00000),
(0.00000, 0.36863, 1.00000),
(0.00000, 0.35294, 1.00000),
(0.00000, 0.33333, 1.00000),
(0.00000, 0.31765, 1.00000),
(0.00000, 0.30196, 1.00000),
(0.00000, 0.28627, 1.00000),
(0.00000, 0.27059, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.23922, 1.00000),
(0.00000, 0.22353, 1.00000),
(0.00000, 0.20784, 1.00000),
(0.00000, 0.19216, 1.00000),
(0.00000, 0.17647, 1.00000),
(0.00000, 0.16078, 1.00000),
(0.00000, 0.14510, 1.00000),
(0.00000, 0.12941, 1.00000),
(0.00000, 0.11373, 1.00000),
(0.00000, 0.09804, 1.00000),
(0.00000, 0.08235, 1.00000),
(0.00000, 0.06667, 1.00000),
(0.00000, 0.05098, 1.00000),
(0.00000, 0.03529, 1.00000),
(0.00000, 0.01961, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.01569, 0.00000, 1.00000),
(0.03137, 0.00000, 1.00000),
(0.04706, 0.00000, 1.00000),
(0.06275, 0.00000, 1.00000),
(0.07843, 0.00000, 1.00000),
(0.09412, 0.00000, 1.00000),
(0.10980, 0.00000, 1.00000),
(0.12549, 0.00000, 1.00000),
(0.14118, 0.00000, 1.00000),
(0.15686, 0.00000, 1.00000),
(0.17255, 0.00000, 1.00000),
(0.18824, 0.00000, 1.00000),
(0.20392, 0.00000, 1.00000),
(0.21961, 0.00000, 1.00000),
(0.23529, 0.00000, 1.00000),
(0.25098, 0.00000, 1.00000),
(0.26667, 0.00000, 1.00000),
(0.28235, 0.00000, 1.00000),
(0.29804, 0.00000, 1.00000),
(0.31373, 0.00000, 1.00000),
(0.33333, 0.00000, 1.00000),
(0.34902, 0.00000, 1.00000),
(0.36471, 0.00000, 1.00000),
(0.38039, 0.00000, 1.00000),
(0.39608, 0.00000, 1.00000),
(0.41176, 0.00000, 1.00000),
(0.42745, 0.00000, 1.00000),
(0.44314, 0.00000, 1.00000),
(0.45882, 0.00000, 1.00000),
(0.47451, 0.00000, 1.00000),
(0.49020, 0.00000, 1.00000),
(0.50588, 0.00000, 1.00000),
(0.52157, 0.00000, 1.00000),
(0.53725, 0.00000, 1.00000),
(0.55294, 0.00000, 1.00000),
(0.56863, 0.00000, 1.00000),
(0.58431, 0.00000, 1.00000),
(0.60000, 0.00000, 1.00000),
(0.61569, 0.00000, 1.00000),
(0.63137, 0.00000, 1.00000),
(0.64706, 0.00000, 1.00000),
(0.66667, 0.00000, 1.00000),
(0.68235, 0.00000, 1.00000),
(0.69804, 0.00000, 1.00000),
(0.71373, 0.00000, 1.00000),
(0.72941, 0.00000, 1.00000),
(0.74510, 0.00000, 1.00000),
(0.76078, 0.00000, 1.00000),
(0.77647, 0.00000, 1.00000),
(0.79216, 0.00000, 1.00000),
(0.80784, 0.00000, 1.00000),
(0.82353, 0.00000, 1.00000),
(0.83922, 0.00000, 1.00000),
(0.85490, 0.00000, 1.00000),
(0.87059, 0.00000, 1.00000),
(0.88627, 0.00000, 1.00000),
(0.90196, 0.00000, 1.00000),
(0.91765, 0.00000, 1.00000),
(0.93333, 0.00000, 1.00000),
(0.94902, 0.00000, 1.00000),
(0.96471, 0.00000, 1.00000),
(0.98039, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.00000, 0.98431),
(1.00000, 0.00000, 0.96863),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.93725),
(1.00000, 0.00000, 0.92157),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.89020),
(1.00000, 0.00000, 0.87451),
(1.00000, 0.00000, 0.85490),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.82353),
(1.00000, 0.00000, 0.80784),
(1.00000, 0.00000, 0.79216),
(1.00000, 0.00000, 0.77647),
(1.00000, 0.00000, 0.76078),
(1.00000, 0.00000, 0.74510),
(1.00000, 0.00000, 0.72941),
(1.00000, 0.00000, 0.70980),
(1.00000, 0.00000, 0.69412),
(1.00000, 0.00000, 0.67843),
(1.00000, 0.00000, 0.66275),
(1.00000, 0.00000, 0.64706),
(1.00000, 0.00000, 0.63137),
(1.00000, 0.00000, 0.61569),
(1.00000, 0.00000, 0.60000),
(1.00000, 0.00000, 0.58431),
(1.00000, 0.00000, 0.56471),
(1.00000, 0.00000, 0.54902),
(1.00000, 0.00000, 0.53333),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.48627),
(1.00000, 0.00000, 0.47059),
(1.00000, 0.00000, 0.45490),
(1.00000, 0.00000, 0.43922),
(1.00000, 0.00000, 0.41961),
(1.00000, 0.00000, 0.40392),
(1.00000, 0.00000, 0.38824),
(1.00000, 0.00000, 0.37255),
(1.00000, 0.00000, 0.35686),
(1.00000, 0.00000, 0.34118),
(1.00000, 0.00000, 0.32549),
(1.00000, 0.00000, 0.30980),
(1.00000, 0.00000, 0.29412),
(1.00000, 0.00000, 0.27451),
(1.00000, 0.00000, 0.25882),
(1.00000, 0.00000, 0.24314),
(1.00000, 0.00000, 0.22745),
(1.00000, 0.00000, 0.21176),
(1.00000, 0.00000, 0.19608),
(1.00000, 0.00000, 0.18039),
(1.00000, 0.00000, 0.16471),
(1.00000, 0.00000, 0.14902),
(1.00000, 0.00000, 0.12941),
(1.00000, 0.00000, 0.11373),
(1.00000, 0.00000, 0.09804),
(1.00000, 0.00000, 0.08235),
(1.00000, 0.00000, 0.06667),
(1.00000, 0.00000, 0.05098),
(1.00000, 0.00000, 0.03529),
(1.00000, 0.00000, 0.01961),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_stairs9 = (
(0.00000, 0.00000, 0.00000),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.19608, 0.19608, 0.19608),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.60784, 0.00000, 0.47451),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.78431, 0.00000, 0.00000),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.92549, 0.65490, 0.37255),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.56863, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.69412, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
)
cmap_backgr = (
(0.00000, 0.00000, 0.00000),
(0.01587, 0.01587, 0.01587),
(0.03174, 0.03174, 0.03174),
(0.04761, 0.04761, 0.04761),
(0.06348, 0.06348, 0.06348),
(0.07935, 0.07935, 0.07935),
(0.09522, 0.09522, 0.09522),
(0.11109, 0.11109, 0.11109),
(0.12696, 0.12696, 0.12696),
(0.14283, 0.14283, 0.14283),
(0.15870, 0.15870, 0.15870),
(0.17457, 0.17457, 0.17457),
(0.19044, 0.19044, 0.19044),
(0.20631, 0.20631, 0.20631),
(0.22218, 0.22218, 0.22218),
(0.23805, 0.23805, 0.23805),
(0.25392, 0.25392, 0.25392),
(0.26979, 0.26979, 0.26979),
(0.28566, 0.28566, 0.28566),
(0.30153, 0.30153, 0.30153),
(0.31740, 0.31740, 0.31740),
(0.33327, 0.33327, 0.33327),
(0.34914, 0.34914, 0.34914),
(0.36501, 0.36501, 0.36501),
(0.38088, 0.38088, 0.38088),
(0.39675, 0.39675, 0.39675),
(0.41262, 0.41262, 0.41262),
(0.42849, 0.42849, 0.42849),
(0.44436, 0.44436, 0.44436),
(0.46023, 0.46023, 0.46023),
(0.47610, 0.47610, 0.47610),
(0.49197, 0.49197, 0.49197),
(0.50784, 0.50784, 0.50784),
(0.52371, 0.52371, 0.52371),
(0.53958, 0.53958, 0.53958),
(0.55545, 0.55545, 0.55545),
(0.57132, 0.57132, 0.57132),
(0.58719, 0.58719, 0.58719),
(0.60306, 0.60306, 0.60306),
(0.61893, 0.61893, 0.61893),
(0.63480, 0.63480, 0.63480),
(0.65067, 0.65067, 0.65067),
(0.66654, 0.66654, 0.66654),
(0.68241, 0.68241, 0.68241),
(0.69828, 0.69828, 0.69828),
(0.71415, 0.71415, 0.71415),
(0.73002, 0.73002, 0.73002),
(0.74589, 0.74589, 0.74589),
(0.76176, 0.76176, 0.76176),
(0.77763, 0.77763, 0.77763),
(0.79350, 0.79350, 0.79350),
(0.80937, 0.80937, 0.80937),
(0.82524, 0.82524, 0.82524),
(0.84111, 0.84111, 0.84111),
(0.85698, 0.85698, 0.85698),
(0.87285, 0.87285, 0.87285),
(0.88872, 0.88872, 0.88872),
(0.90459, 0.90459, 0.90459),
(0.92046, 0.92046, 0.92046),
(0.93633, 0.93633, 0.93633),
(0.95220, 0.95220, 0.95220),
(0.96807, 0.96807, 0.96807),
(0.98394, 0.98394, 0.98394),
(0.99981, 0.99981, 0.99981),
(0.00000, 0.00000, 0.99981),
(0.00000, 0.01587, 0.98394),
(0.00000, 0.03174, 0.96807),
(0.00000, 0.04761, 0.95220),
(0.00000, 0.06348, 0.93633),
(0.00000, 0.07935, 0.92046),
(0.00000, 0.09522, 0.90459),
(0.00000, 0.11109, 0.88872),
(0.00000, 0.12696, 0.87285),
(0.00000, 0.14283, 0.85698),
(0.00000, 0.15870, 0.84111),
(0.00000, 0.17457, 0.82524),
(0.00000, 0.19044, 0.80937),
(0.00000, 0.20631, 0.79350),
(0.00000, 0.22218, 0.77763),
(0.00000, 0.23805, 0.76176),
(0.00000, 0.25392, 0.74589),
(0.00000, 0.26979, 0.73002),
(0.00000, 0.28566, 0.71415),
(0.00000, 0.30153, 0.69828),
(0.00000, 0.31740, 0.68241),
(0.00000, 0.33327, 0.66654),
(0.00000, 0.34914, 0.65067),
(0.00000, 0.36501, 0.63480),
(0.00000, 0.38088, 0.61893),
(0.00000, 0.39675, 0.60306),
(0.00000, 0.41262, 0.58719),
(0.00000, 0.42849, 0.57132),
(0.00000, 0.44436, 0.55545),
(0.00000, 0.46023, 0.53958),
(0.00000, 0.47610, 0.52371),
(0.00000, 0.49197, 0.50784),
(0.00000, 0.50784, 0.49197),
(0.00000, 0.52371, 0.47610),
(0.00000, 0.53958, 0.46023),
(0.00000, 0.55545, 0.44436),
(0.00000, 0.57132, 0.42849),
(0.00000, 0.58719, 0.41262),
(0.00000, 0.60306, 0.39675),
(0.00000, 0.61893, 0.38088),
(0.00000, 0.63480, 0.36501),
(0.00000, 0.65067, 0.34914),
(0.00000, 0.66654, 0.33327),
(0.00000, 0.68241, 0.31740),
(0.00000, 0.69828, 0.30153),
(0.00000, 0.71415, 0.28566),
(0.00000, 0.73002, 0.26979),
(0.00000, 0.74589, 0.25392),
(0.00000, 0.76176, 0.23805),
(0.00000, 0.77763, 0.22218),
(0.00000, 0.79350, 0.20631),
(0.00000, 0.80937, 0.19044),
(0.00000, 0.82524, 0.17457),
(0.00000, 0.84111, 0.15870),
(0.00000, 0.85698, 0.14283),
(0.00000, 0.87285, 0.12696),
(0.00000, 0.88872, 0.11109),
(0.00000, 0.90459, 0.09522),
(0.00000, 0.92046, 0.07935),
(0.00000, 0.93633, 0.06348),
(0.00000, 0.95220, 0.04761),
(0.00000, 0.96807, 0.03174),
(0.00000, 0.98394, 0.01587),
(0.00000, 0.99981, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.01587, 1.00000, 0.00000),
(0.03174, 1.00000, 0.00000),
(0.04761, 1.00000, 0.00000),
(0.06348, 1.00000, 0.00000),
(0.07935, 1.00000, 0.00000),
(0.09522, 1.00000, 0.00000),
(0.11109, 1.00000, 0.00000),
(0.12696, 1.00000, 0.00000),
(0.14283, 1.00000, 0.00000),
(0.15870, 1.00000, 0.00000),
(0.17457, 1.00000, 0.00000),
(0.19044, 1.00000, 0.00000),
(0.20631, 1.00000, 0.00000),
(0.22218, 1.00000, 0.00000),
(0.23805, 1.00000, 0.00000),
(0.25392, 1.00000, 0.00000),
(0.26979, 1.00000, 0.00000),
(0.28566, 1.00000, 0.00000),
(0.30153, 1.00000, 0.00000),
(0.31740, 1.00000, 0.00000),
(0.33327, 1.00000, 0.00000),
(0.34914, 1.00000, 0.00000),
(0.36501, 1.00000, 0.00000),
(0.38088, 1.00000, 0.00000),
(0.39675, 1.00000, 0.00000),
(0.41262, 1.00000, 0.00000),
(0.42849, 1.00000, 0.00000),
(0.44436, 1.00000, 0.00000),
(0.46023, 1.00000, 0.00000),
(0.47610, 1.00000, 0.00000),
(0.49197, 1.00000, 0.00000),
(0.50784, 1.00000, 0.00000),
(0.52371, 1.00000, 0.00000),
(0.53958, 1.00000, 0.00000),
(0.55545, 1.00000, 0.00000),
(0.57132, 1.00000, 0.00000),
(0.58719, 1.00000, 0.00000),
(0.60306, 1.00000, 0.00000),
(0.61893, 1.00000, 0.00000),
(0.63480, 1.00000, 0.00000),
(0.65067, 1.00000, 0.00000),
(0.66654, 1.00000, 0.00000),
(0.68241, 1.00000, 0.00000),
(0.69828, 1.00000, 0.00000),
(0.71415, 1.00000, 0.00000),
(0.73002, 1.00000, 0.00000),
(0.74589, 1.00000, 0.00000),
(0.76176, 1.00000, 0.00000),
(0.77763, 1.00000, 0.00000),
(0.79350, 1.00000, 0.00000),
(0.80937, 1.00000, 0.00000),
(0.82524, 1.00000, 0.00000),
(0.84111, 1.00000, 0.00000),
(0.85698, 1.00000, 0.00000),
(0.87285, 1.00000, 0.00000),
(0.88872, 1.00000, 0.00000),
(0.90459, 1.00000, 0.00000),
(0.92046, 1.00000, 0.00000),
(0.93633, 1.00000, 0.00000),
(0.95220, 1.00000, 0.00000),
(0.96807, 1.00000, 0.00000),
(0.98394, 1.00000, 0.00000),
(0.99981, 1.00000, 0.00000),
(1.00000, 0.99981, 0.00000),
(1.00000, 0.98394, 0.00000),
(1.00000, 0.96807, 0.00000),
(1.00000, 0.95220, 0.00000),
(1.00000, 0.93633, 0.00000),
(1.00000, 0.92046, 0.00000),
(1.00000, 0.90459, 0.00000),
(1.00000, 0.88872, 0.00000),
(1.00000, 0.87285, 0.00000),
(1.00000, 0.85698, 0.00000),
(1.00000, 0.84111, 0.00000),
(1.00000, 0.82524, 0.00000),
(1.00000, 0.80937, 0.00000),
(1.00000, 0.79350, 0.00000),
(1.00000, 0.77763, 0.00000),
(1.00000, 0.76176, 0.00000),
(1.00000, 0.74589, 0.00000),
(1.00000, 0.73002, 0.00000),
(1.00000, 0.71415, 0.00000),
(1.00000, 0.69828, 0.00000),
(1.00000, 0.68241, 0.00000),
(1.00000, 0.66654, 0.00000),
(1.00000, 0.65067, 0.00000),
(1.00000, 0.63480, 0.00000),
(1.00000, 0.61893, 0.00000),
(1.00000, 0.60306, 0.00000),
(1.00000, 0.58719, 0.00000),
(1.00000, 0.57132, 0.00000),
(1.00000, 0.55545, 0.00000),
(1.00000, 0.53958, 0.00000),
(1.00000, 0.52371, 0.00000),
(1.00000, 0.50784, 0.00000),
(1.00000, 0.49197, 0.00000),
(1.00000, 0.47610, 0.00000),
(1.00000, 0.46023, 0.00000),
(1.00000, 0.44436, 0.00000),
(1.00000, 0.42849, 0.00000),
(1.00000, 0.41262, 0.00000),
(1.00000, 0.39675, 0.00000),
(1.00000, 0.38088, 0.00000),
(1.00000, 0.36501, 0.00000),
(1.00000, 0.34914, 0.00000),
(1.00000, 0.33327, 0.00000),
(1.00000, 0.31740, 0.00000),
(1.00000, 0.30153, 0.00000),
(1.00000, 0.28566, 0.00000),
(1.00000, 0.26979, 0.00000),
(1.00000, 0.25392, 0.00000),
(1.00000, 0.23805, 0.00000),
(1.00000, 0.22218, 0.00000),
(1.00000, 0.20631, 0.00000),
(1.00000, 0.19044, 0.00000),
(1.00000, 0.17457, 0.00000),
(1.00000, 0.15870, 0.00000),
(1.00000, 0.14283, 0.00000),
(1.00000, 0.12696, 0.00000),
(1.00000, 0.11109, 0.00000),
(1.00000, 0.09522, 0.00000),
(1.00000, 0.07935, 0.00000),
(1.00000, 0.06348, 0.00000),
(1.00000, 0.04761, 0.00000),
(1.00000, 0.03174, 0.00000),
(1.00000, 0.01587, 0.00000),
(1.00000, 0.00000, 0.00000),
)
cmap_idl12 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.32941, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 0.65882, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.32941),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(0.50196, 0.00000, 1.00000),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.50196),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.25098),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86275, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.86667, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87059, 0.74510, 0.74510),
(0.87451, 0.74510, 0.74510),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(0.86275, 0.86275, 0.86275),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow1 = (
(0.00000, 0.00000, 0.16471),
(0.02745, 0.00000, 0.18431),
(0.05882, 0.00000, 0.20000),
(0.08627, 0.00000, 0.21961),
(0.11373, 0.00000, 0.23922),
(0.14510, 0.00000, 0.25882),
(0.17647, 0.00000, 0.27843),
(0.20392, 0.00000, 0.29804),
(0.23137, 0.00000, 0.31765),
(0.26275, 0.00000, 0.33725),
(0.29412, 0.00000, 0.35686),
(0.32157, 0.00000, 0.37647),
(0.35294, 0.00000, 0.39608),
(0.38039, 0.00000, 0.41569),
(0.41176, 0.00000, 0.43529),
(0.43922, 0.00000, 0.45490),
(0.47059, 0.00000, 0.47451),
(0.49804, 0.00000, 0.49412),
(0.52941, 0.00000, 0.51373),
(0.55686, 0.00000, 0.53725),
(0.58824, 0.00000, 0.55686),
(0.55686, 0.00000, 0.57647),
(0.52941, 0.00000, 0.59608),
(0.49804, 0.00000, 0.61569),
(0.47059, 0.00000, 0.63922),
(0.43922, 0.00000, 0.65882),
(0.41176, 0.00000, 0.67843),
(0.38039, 0.00000, 0.70196),
(0.35294, 0.00000, 0.72157),
(0.32157, 0.00000, 0.74118),
(0.29412, 0.00000, 0.76471),
(0.26275, 0.00000, 0.78431),
(0.23137, 0.00000, 0.80392),
(0.20392, 0.00000, 0.82745),
(0.17647, 0.00000, 0.84706),
(0.14510, 0.00000, 0.87059),
(0.11373, 0.00000, 0.89020),
(0.08627, 0.00000, 0.91373),
(0.05882, 0.00000, 0.93333),
(0.02745, 0.00000, 0.95686),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 0.97647),
(0.00000, 0.00784, 0.95686),
(0.00000, 0.01569, 0.93333),
(0.00000, 0.02353, 0.91373),
(0.00000, 0.03137, 0.89020),
(0.00000, 0.03922, 0.87059),
(0.00000, 0.05098, 0.85098),
(0.00000, 0.06275, 0.83137),
(0.00000, 0.07843, 0.81176),
(0.00000, 0.09804, 0.79216),
(0.00000, 0.11765, 0.77255),
(0.00000, 0.13725, 0.75294),
(0.00000, 0.15686, 0.73333),
(0.00000, 0.17647, 0.71373),
(0.00000, 0.19608, 0.69412),
(0.00000, 0.21569, 0.67451),
(0.00000, 0.23529, 0.65882),
(0.00000, 0.25490, 0.64314),
(0.00000, 0.27059, 0.62745),
(0.00000, 0.28627, 0.61176),
(0.00000, 0.30196, 0.59608),
(0.00000, 0.32157, 0.58039),
(0.00000, 0.33333, 0.56471),
(0.00000, 0.34510, 0.54902),
(0.00000, 0.35686, 0.53333),
(0.00000, 0.36863, 0.51765),
(0.00000, 0.38039, 0.50196),
(0.00000, 0.39216, 0.48627),
(0.00000, 0.40392, 0.47059),
(0.00000, 0.41176, 0.45882),
(0.00000, 0.42353, 0.44706),
(0.00000, 0.43529, 0.43529),
(0.00000, 0.44706, 0.42353),
(0.00000, 0.45882, 0.41176),
(0.00000, 0.46667, 0.40000),
(0.00000, 0.47843, 0.38824),
(0.00000, 0.49020, 0.37647),
(0.00000, 0.49804, 0.36471),
(0.00000, 0.50980, 0.35294),
(0.00000, 0.52157, 0.34118),
(0.00000, 0.52941, 0.32941),
(0.00000, 0.54118, 0.31765),
(0.00000, 0.55294, 0.30588),
(0.00000, 0.56078, 0.29412),
(0.00000, 0.57255, 0.28235),
(0.00000, 0.58431, 0.27059),
(0.00000, 0.59216, 0.25882),
(0.00000, 0.60392, 0.24706),
(0.00000, 0.61176, 0.23529),
(0.00000, 0.62353, 0.22353),
(0.00000, 0.63137, 0.21176),
(0.00000, 0.64314, 0.20000),
(0.00000, 0.65098, 0.18824),
(0.00000, 0.66275, 0.17647),
(0.00000, 0.67059, 0.16471),
(0.00000, 0.68235, 0.15294),
(0.00000, 0.69020, 0.14118),
(0.00000, 0.70196, 0.12941),
(0.00000, 0.70980, 0.11765),
(0.00000, 0.72157, 0.10196),
(0.00000, 0.72941, 0.08627),
(0.00000, 0.74118, 0.07059),
(0.00000, 0.74902, 0.05490),
(0.00000, 0.76078, 0.03922),
(0.00000, 0.76863, 0.02353),
(0.00000, 0.77647, 0.00000),
(0.00000, 0.78824, 0.00000),
(0.00000, 0.79608, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.81569, 0.00000),
(0.00000, 0.82353, 0.00000),
(0.00000, 0.83529, 0.00000),
(0.00000, 0.84314, 0.00000),
(0.00000, 0.85490, 0.00000),
(0.00000, 0.86275, 0.00000),
(0.00000, 0.87059, 0.00000),
(0.00000, 0.88235, 0.00000),
(0.00000, 0.89020, 0.00000),
(0.00000, 0.89804, 0.00000),
(0.00000, 0.90980, 0.00000),
(0.00000, 0.91765, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.93725, 0.00000),
(0.00000, 0.94510, 0.00000),
(0.00000, 0.95294, 0.00000),
(0.00000, 0.96078, 0.00000),
(0.00000, 0.97255, 0.00000),
(0.00000, 0.98039, 0.00000),
(0.00000, 0.98824, 0.00000),
(0.00784, 1.00000, 0.00000),
(0.01569, 0.98824, 0.00000),
(0.02353, 0.98039, 0.00000),
(0.03137, 0.97255, 0.00000),
(0.04314, 0.96078, 0.00000),
(0.05490, 0.95294, 0.00000),
(0.06667, 0.94510, 0.00000),
(0.07843, 0.93725, 0.00000),
(0.09020, 0.92549, 0.00000),
(0.10196, 0.91765, 0.00000),
(0.11373, 0.90980, 0.00000),
(0.12549, 0.89804, 0.00000),
(0.13725, 0.89020, 0.00000),
(0.14902, 0.88235, 0.00000),
(0.16471, 0.87059, 0.00000),
(0.20000, 0.86275, 0.00000),
(0.23529, 0.85490, 0.00000),
(0.26667, 0.84314, 0.00000),
(0.30588, 0.83529, 0.00000),
(0.34118, 0.82353, 0.00000),
(0.37647, 0.81569, 0.00000),
(0.41176, 0.80784, 0.00000),
(0.44706, 0.79608, 0.00000),
(0.48627, 0.78824, 0.00000),
(0.52157, 0.77647, 0.00000),
(0.56078, 0.76863, 0.00000),
(0.59608, 0.77647, 0.00000),
(0.63529, 0.78824, 0.00000),
(0.67059, 0.80000, 0.00000),
(0.70980, 0.81176, 0.00000),
(0.74902, 0.82745, 0.00000),
(0.78431, 0.84314, 0.00000),
(0.82353, 0.85882, 0.00000),
(0.85882, 0.87059, 0.00000),
(0.89804, 0.89020, 0.00000),
(0.93333, 0.90196, 0.00000),
(0.97647, 0.92157, 0.00000),
(1.00000, 0.93333, 0.00000),
(1.00000, 0.95294, 0.00000),
(1.00000, 0.96863, 0.00000),
(1.00000, 0.98824, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.97647, 0.00000),
(1.00000, 0.93725, 0.00000),
(1.00000, 0.89804, 0.00000),
(1.00000, 0.85882, 0.00000),
(1.00000, 0.81961, 0.00000),
(1.00000, 0.78039, 0.00000),
(1.00000, 0.74118, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.66275, 0.00000),
(1.00000, 0.62353, 0.00000),
(1.00000, 0.58431, 0.00000),
(1.00000, 0.54510, 0.00000),
(1.00000, 0.50980, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.43137, 0.00000),
(1.00000, 0.39216, 0.00000),
(1.00000, 0.35294, 0.00000),
(1.00000, 0.31765, 0.00000),
(1.00000, 0.27451, 0.00000),
(1.00000, 0.23922, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.16863, 0.00000),
(1.00000, 0.12941, 0.00000),
(1.00000, 0.09804, 0.00000),
(1.00000, 0.08235, 0.00000),
(1.00000, 0.06275, 0.00000),
(1.00000, 0.04706, 0.00000),
(1.00000, 0.02353, 0.00000),
(1.00000, 0.00000, 0.00000),
(0.99216, 0.00000, 0.00000),
(0.98431, 0.00000, 0.00000),
(0.97647, 0.00000, 0.00000),
(0.96863, 0.00000, 0.00000),
(0.96078, 0.00000, 0.00000),
(0.95294, 0.00000, 0.00000),
(0.94510, 0.00000, 0.00000),
(0.93725, 0.00000, 0.00000),
(0.92941, 0.00000, 0.00000),
(0.92157, 0.00000, 0.00000),
(0.91373, 0.00000, 0.00000),
(0.90588, 0.00000, 0.00000),
(0.89804, 0.00000, 0.00000),
(0.89020, 0.00000, 0.00000),
(0.88235, 0.00000, 0.00000),
(0.87451, 0.00000, 0.00000),
(0.86667, 0.00000, 0.00000),
(0.85882, 0.00000, 0.00000),
(0.85098, 0.00000, 0.00000),
(0.84314, 0.00000, 0.00000),
(0.83529, 0.00000, 0.00000),
(0.82745, 0.00000, 0.00000),
(0.81961, 0.00000, 0.00000),
(0.81176, 0.00000, 0.00000),
(0.80392, 0.00000, 0.00000),
(0.79608, 0.00000, 0.00000),
(0.78824, 0.00000, 0.00000),
(0.78039, 0.00000, 0.00000),
(0.77255, 0.00000, 0.00000),
(0.76471, 0.00000, 0.00000),
(0.75686, 0.00000, 0.00000),
(0.74902, 0.00000, 0.00000),
(0.74118, 0.00000, 0.00000),
(0.73333, 0.00000, 0.00000),
)
cmap_idl14 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.16471, 0.00000),
(0.00000, 0.33333, 0.00000),
(0.00000, 0.49804, 0.00000),
(0.00000, 0.66667, 0.00000),
(0.00000, 0.83137, 0.00000),
(0.00000, 1.00000, 0.00000),
(0.00000, 0.96471, 0.00000),
(0.00000, 0.92549, 0.00000),
(0.00000, 0.88627, 0.00000),
(0.00000, 0.84706, 0.00000),
(0.00000, 0.80784, 0.00000),
(0.00000, 0.77255, 0.00000),
(0.00000, 0.73333, 0.00000),
(0.00000, 0.69412, 0.00000),
(0.00000, 0.65490, 0.00000),
(0.00000, 0.61569, 0.00000),
(0.00000, 0.58039, 0.00000),
(0.00000, 0.54118, 0.00000),
(0.00000, 0.50196, 0.00000),
(0.00000, 0.46275, 0.00000),
(0.00000, 0.42353, 0.00000),
(0.00000, 0.38824, 0.00000),
(0.00000, 0.34902, 0.00000),
(0.00000, 0.30980, 0.00000),
(0.00000, 0.27059, 0.00000),
(0.00000, 0.23137, 0.00000),
(0.00000, 0.19608, 0.00000),
(0.00000, 0.15686, 0.00000),
(0.00000, 0.11765, 0.00000),
(0.00000, 0.07843, 0.00000),
(0.00000, 0.03922, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.03137),
(0.00000, 0.00000, 0.06275),
(0.00000, 0.00000, 0.09412),
(0.00000, 0.00000, 0.12549),
(0.00000, 0.00000, 0.16078),
(0.00000, 0.00000, 0.19216),
(0.00000, 0.00000, 0.22353),
(0.00000, 0.00000, 0.25490),
(0.00000, 0.00000, 0.29020),
(0.00000, 0.00000, 0.32157),
(0.00000, 0.00000, 0.35294),
(0.00000, 0.00000, 0.38431),
(0.00000, 0.00000, 0.41569),
(0.00000, 0.00000, 0.45098),
(0.00000, 0.00000, 0.48235),
(0.00000, 0.00000, 0.51373),
(0.00000, 0.00000, 0.54510),
(0.00000, 0.00000, 0.58039),
(0.00000, 0.00000, 0.61176),
(0.00000, 0.00000, 0.64314),
(0.00000, 0.00000, 0.67451),
(0.00000, 0.00000, 0.70588),
(0.00000, 0.00000, 0.74118),
(0.00000, 0.00000, 0.77255),
(0.00000, 0.00000, 0.80392),
(0.00000, 0.00000, 0.83529),
(0.00000, 0.00000, 0.87059),
(0.00000, 0.00000, 0.90196),
(0.00000, 0.00000, 0.93333),
(0.00000, 0.00000, 0.96471),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.00000, 0.00000),
(0.02745, 0.00000, 0.01961),
(0.05882, 0.00000, 0.03922),
(0.09020, 0.00000, 0.05882),
(0.12157, 0.00000, 0.08235),
(0.15294, 0.00000, 0.10196),
(0.18431, 0.00000, 0.12157),
(0.21569, 0.00000, 0.14510),
(0.24706, 0.00000, 0.16471),
(0.27451, 0.00000, 0.18431),
(0.30588, 0.00000, 0.20784),
(0.33725, 0.00000, 0.22745),
(0.36863, 0.00000, 0.24706),
(0.40000, 0.00000, 0.27059),
(0.43137, 0.00000, 0.29020),
(0.46275, 0.00000, 0.30980),
(0.49412, 0.00000, 0.33333),
(0.52549, 0.00000, 0.34902),
(0.55686, 0.00000, 0.36863),
(0.59216, 0.00000, 0.38431),
(0.62353, 0.00000, 0.40392),
(0.65882, 0.00000, 0.42353),
(0.69020, 0.00000, 0.43922),
(0.72157, 0.00000, 0.45882),
(0.75686, 0.00000, 0.47451),
(0.78824, 0.00000, 0.49412),
(0.82353, 0.00000, 0.51373),
(0.85490, 0.00000, 0.52941),
(0.88627, 0.00000, 0.54902),
(0.92157, 0.00000, 0.56471),
(0.95294, 0.00000, 0.58431),
(0.98824, 0.00000, 0.60392),
(0.00000, 0.00000, 0.00000),
(0.00392, 0.00000, 0.00000),
(0.00784, 0.00000, 0.00000),
(0.01176, 0.00000, 0.00000),
(0.01569, 0.00000, 0.00000),
(0.01961, 0.00000, 0.00000),
(0.02353, 0.00000, 0.00000),
(0.02745, 0.00000, 0.00000),
(0.03137, 0.00000, 0.00000),
(0.03529, 0.00000, 0.00000),
(0.03922, 0.00000, 0.00000),
(0.04314, 0.00000, 0.00000),
(0.04706, 0.00000, 0.00000),
(0.05490, 0.00000, 0.00000),
(0.06275, 0.00000, 0.00000),
(0.07059, 0.00000, 0.00000),
(0.07843, 0.00000, 0.00000),
(0.09020, 0.00000, 0.00000),
(0.09804, 0.00000, 0.00000),
(0.10588, 0.00000, 0.00000),
(0.11373, 0.00000, 0.00000),
(0.12549, 0.00000, 0.00000),
(0.13333, 0.00000, 0.00000),
(0.14118, 0.00000, 0.00000),
(0.14902, 0.00000, 0.00000),
(0.16078, 0.00000, 0.00000),
(0.17255, 0.00000, 0.00000),
(0.18431, 0.00000, 0.00000),
(0.19608, 0.00000, 0.00000),
(0.20784, 0.00000, 0.00000),
(0.21961, 0.00000, 0.00000),
(0.23137, 0.00000, 0.00000),
(0.24706, 0.00000, 0.00000),
(0.25882, 0.00000, 0.00000),
(0.27059, 0.00000, 0.00000),
(0.28235, 0.00000, 0.00000),
(0.29412, 0.00000, 0.00000),
(0.30588, 0.00000, 0.00000),
(0.32157, 0.00000, 0.00392),
(0.33333, 0.00000, 0.00392),
(0.34902, 0.00000, 0.00392),
(0.36471, 0.00000, 0.00392),
(0.38039, 0.00000, 0.00392),
(0.39608, 0.00000, 0.00392),
(0.41176, 0.00000, 0.00392),
(0.42353, 0.00000, 0.00392),
(0.43922, 0.00000, 0.00392),
(0.45490, 0.00000, 0.00392),
(0.47059, 0.00000, 0.00392),
(0.48627, 0.00000, 0.00392),
(0.50196, 0.00392, 0.00392),
(0.51373, 0.00392, 0.00392),
(0.52941, 0.00392, 0.00392),
(0.54510, 0.00392, 0.00392),
(0.56078, 0.00392, 0.00392),
(0.57647, 0.00392, 0.00392),
(0.59216, 0.00392, 0.00392),
(0.60784, 0.00392, 0.00392),
(0.62353, 0.00392, 0.00392),
(0.63922, 0.00392, 0.00392),
(0.65490, 0.00392, 0.00392),
(0.67059, 0.00392, 0.00392),
(0.68627, 0.00392, 0.00392),
(0.69804, 0.00392, 0.00392),
(0.70980, 0.00392, 0.00392),
(0.72549, 0.00392, 0.00392),
(0.73725, 0.00392, 0.00392),
(0.75294, 0.00392, 0.00392),
(0.76471, 0.00392, 0.00392),
(0.77647, 0.00392, 0.00392),
(0.79216, 0.00392, 0.00392),
(0.80392, 0.00392, 0.00392),
(0.81961, 0.00392, 0.00392),
(0.83137, 0.00392, 0.00392),
(0.84706, 0.00392, 0.00784),
(0.85490, 0.00392, 0.00784),
(0.86275, 0.00392, 0.00784),
(0.87451, 0.00392, 0.00784),
(0.88235, 0.00392, 0.00784),
(0.89020, 0.00392, 0.00784),
(0.90196, 0.00392, 0.00784),
(0.90980, 0.00392, 0.00784),
(0.91765, 0.00392, 0.00784),
(0.92941, 0.00392, 0.00784),
(0.93725, 0.00392, 0.00784),
(0.94510, 0.00392, 0.00784),
(0.95686, 0.00784, 0.00784),
(0.95686, 0.00784, 0.00784),
(0.96078, 0.00784, 0.00784),
(0.96471, 0.00784, 0.00784),
(0.96863, 0.00784, 0.00784),
(0.96863, 0.00784, 0.00784),
(0.97255, 0.00784, 0.00784),
(0.97647, 0.00784, 0.00784),
(0.98039, 0.00784, 0.00784),
(0.98039, 0.00784, 0.00784),
(0.98431, 0.00784, 0.00784),
(0.98824, 0.00784, 0.00784),
(0.99216, 0.00784, 0.00784),
(0.99608, 0.00392, 0.00784),
(0.99608, 0.00392, 0.00784),
(0.99608, 0.01176, 0.00784),
(0.99608, 0.01961, 0.00784),
(0.99608, 0.03137, 0.00784),
(0.99608, 0.03922, 0.00784),
(0.99608, 0.04706, 0.00784),
(0.99608, 0.05882, 0.00784),
(0.99608, 0.06667, 0.00784),
(0.99608, 0.07451, 0.00784),
(0.99608, 0.08627, 0.00784),
(0.99608, 0.09412, 0.00784),
(0.99608, 0.10196, 0.00784),
(0.99608, 0.11373, 0.00784),
(0.99608, 0.12157, 0.00784),
(0.99608, 0.12941, 0.00784),
(0.99608, 0.14118, 0.00784),
(0.99608, 0.14118, 0.00784),
(0.99608, 0.14902, 0.01176),
(0.99608, 0.15686, 0.01569),
(0.99608, 0.16471, 0.01961),
(1.00000, 0.17647, 0.02745),
(1.00000, 0.18824, 0.03529),
(1.00000, 0.20000, 0.04706),
(1.00000, 0.21176, 0.05490),
(1.00000, 0.22745, 0.06667),
(1.00000, 0.23922, 0.07843),
(1.00000, 0.25098, 0.09020),
(1.00000, 0.26275, 0.10588),
(1.00000, 0.27451, 0.11765),
(1.00000, 0.28627, 0.13333),
(1.00000, 0.30196, 0.15294),
(1.00000, 0.32157, 0.17255),
(1.00000, 0.34118, 0.19216),
(1.00000, 0.36078, 0.21569),
(1.00000, 0.37647, 0.23529),
(1.00000, 0.39216, 0.25490),
(1.00000, 0.40784, 0.27843),
(1.00000, 0.42353, 0.29804),
(1.00000, 0.44314, 0.32157),
(1.00000, 0.46667, 0.34902),
(1.00000, 0.49020, 0.38039),
(1.00000, 0.51373, 0.40784),
(1.00000, 0.54118, 0.43922),
(1.00000, 0.56471, 0.47059),
(1.00000, 0.59216, 0.50196),
(1.00000, 0.61569, 0.53333),
(1.00000, 0.64314, 0.56863),
(1.00000, 0.67059, 0.60000),
(1.00000, 0.69804, 0.63529),
(1.00000, 0.72549, 0.67059),
(1.00000, 0.75686, 0.70588),
(1.00000, 0.78431, 0.74118),
(1.00000, 0.81569, 0.77647),
(1.00000, 0.84314, 0.81176),
(1.00000, 0.87451, 0.85098),
(1.00000, 0.89804, 0.87843),
(1.00000, 0.92157, 0.90980),
(1.00000, 0.94902, 0.93725),
(1.00000, 0.97255, 0.96863),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow2 = (
(0.00000, 0.00000, 0.00000),
(0.03137, 0.00000, 0.03137),
(0.06275, 0.00000, 0.06275),
(0.09412, 0.00000, 0.09412),
(0.12549, 0.00000, 0.12549),
(0.15686, 0.00000, 0.15686),
(0.18824, 0.00000, 0.18824),
(0.21961, 0.00000, 0.21961),
(0.25098, 0.00000, 0.25098),
(0.28235, 0.00000, 0.28235),
(0.31373, 0.00000, 0.31373),
(0.34510, 0.00000, 0.34510),
(0.37647, 0.00000, 0.37647),
(0.40784, 0.00000, 0.40784),
(0.43922, 0.00000, 0.43922),
(0.47059, 0.00000, 0.47059),
(0.50196, 0.00000, 0.50196),
(0.53333, 0.00000, 0.53333),
(0.56471, 0.00000, 0.56471),
(0.59608, 0.00000, 0.59608),
(0.62745, 0.00000, 0.62745),
(0.65882, 0.00000, 0.65882),
(0.69020, 0.00000, 0.69020),
(0.72157, 0.00000, 0.72157),
(0.75294, 0.00000, 0.75294),
(0.78431, 0.00000, 0.78431),
(0.81569, 0.00000, 0.81569),
(0.84706, 0.00000, 0.84706),
(0.87843, 0.00000, 0.87843),
(0.90980, 0.00000, 0.90980),
(0.94118, 0.00000, 0.94118),
(0.97255, 0.00000, 0.97255),
(1.00000, 0.00000, 1.00000),
(0.96863, 0.00000, 1.00000),
(0.93725, 0.00000, 1.00000),
(0.90588, 0.00000, 1.00000),
(0.87451, 0.00000, 1.00000),
(0.84314, 0.00000, 1.00000),
(0.81176, 0.00000, 1.00000),
(0.78039, 0.00000, 1.00000),
(0.74902, 0.00000, 1.00000),
(0.71765, 0.00000, 1.00000),
(0.68627, 0.00000, 1.00000),
(0.65490, 0.00000, 1.00000),
(0.62353, 0.00000, 1.00000),
(0.59216, 0.00000, 1.00000),
(0.56078, 0.00000, 1.00000),
(0.52941, 0.00000, 1.00000),
(0.49804, 0.00000, 1.00000),
(0.46667, 0.00000, 1.00000),
(0.43529, 0.00000, 1.00000),
(0.40392, 0.00000, 1.00000),
(0.37255, 0.00000, 1.00000),
(0.34118, 0.00000, 1.00000),
(0.30980, 0.00000, 1.00000),
(0.27843, 0.00000, 1.00000),
(0.24706, 0.00000, 1.00000),
(0.21569, 0.00000, 1.00000),
(0.18431, 0.00000, 1.00000),
(0.15294, 0.00000, 1.00000),
(0.12157, 0.00000, 1.00000),
(0.09020, 0.00000, 1.00000),
(0.05882, 0.00000, 1.00000),
(0.02745, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03137, 1.00000),
(0.00000, 0.06275, 1.00000),
(0.00000, 0.09412, 1.00000),
(0.00000, 0.12549, 1.00000),
(0.00000, 0.15686, 1.00000),
(0.00000, 0.18824, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25098, 1.00000),
(0.00000, 0.28235, 1.00000),
(0.00000, 0.31373, 1.00000),
(0.00000, 0.34510, 1.00000),
(0.00000, 0.37647, 1.00000),
(0.00000, 0.40784, 1.00000),
(0.00000, 0.43922, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.50196, 1.00000),
(0.00000, 0.53333, 1.00000),
(0.00000, 0.56471, 1.00000),
(0.00000, 0.59608, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.65882, 1.00000),
(0.00000, 0.69020, 1.00000),
(0.00000, 0.72157, 1.00000),
(0.00000, 0.75294, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.81569, 1.00000),
(0.00000, 0.84706, 1.00000),
(0.00000, 0.87843, 1.00000),
(0.00000, 0.90980, 1.00000),
(0.00000, 0.94118, 1.00000),
(0.00000, 0.97255, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.96863),
(0.00000, 1.00000, 0.93725),
(0.00000, 1.00000, 0.90588),
(0.00000, 1.00000, 0.87451),
(0.00000, 1.00000, 0.84314),
(0.00000, 1.00000, 0.81176),
(0.00000, 1.00000, 0.78039),
(0.00000, 1.00000, 0.74902),
(0.00000, 1.00000, 0.71765),
(0.00000, 1.00000, 0.68627),
(0.00000, 1.00000, 0.65490),
(0.00000, 1.00000, 0.62353),
(0.00000, 1.00000, 0.59216),
(0.00000, 1.00000, 0.56078),
(0.00000, 1.00000, 0.52941),
(0.00000, 1.00000, 0.49804),
(0.00000, 1.00000, 0.46667),
(0.00000, 1.00000, 0.43529),
(0.00000, 1.00000, 0.40392),
(0.00000, 1.00000, 0.37255),
(0.00000, 1.00000, 0.34118),
(0.00000, 1.00000, 0.30980),
(0.00000, 1.00000, 0.27843),
(0.00000, 1.00000, 0.24706),
(0.00000, 1.00000, 0.21569),
(0.00000, 1.00000, 0.18431),
(0.00000, 1.00000, 0.15294),
(0.00000, 1.00000, 0.12157),
(0.00000, 1.00000, 0.09020),
(0.00000, 1.00000, 0.05882),
(0.00000, 1.00000, 0.02745),
(0.00000, 1.00000, 0.00000),
(0.03137, 1.00000, 0.00000),
(0.06275, 1.00000, 0.00000),
(0.09412, 1.00000, 0.00000),
(0.12549, 1.00000, 0.00000),
(0.15686, 1.00000, 0.00000),
(0.18824, 1.00000, 0.00000),
(0.21961, 1.00000, 0.00000),
(0.25098, 1.00000, 0.00000),
(0.28235, 1.00000, 0.00000),
(0.31373, 1.00000, 0.00000),
(0.34510, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.40784, 1.00000, 0.00000),
(0.43922, 1.00000, 0.00000),
(0.47059, 1.00000, 0.00000),
(0.50196, 1.00000, 0.00000),
(0.53333, 1.00000, 0.00000),
(0.56471, 1.00000, 0.00000),
(0.59608, 1.00000, 0.00000),
(0.62745, 1.00000, 0.00000),
(0.65882, 1.00000, 0.00000),
(0.69020, 1.00000, 0.00000),
(0.72157, 1.00000, 0.00000),
(0.75294, 1.00000, 0.00000),
(0.78431, 1.00000, 0.00000),
(0.81569, 1.00000, 0.00000),
(0.84706, 1.00000, 0.00000),
(0.87843, 1.00000, 0.00000),
(0.90980, 1.00000, 0.00000),
(0.94118, 1.00000, 0.00000),
(0.97255, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.98431, 0.00000),
(1.00000, 0.96863, 0.00000),
(1.00000, 0.95294, 0.00000),
(1.00000, 0.93725, 0.00000),
(1.00000, 0.92157, 0.00000),
(1.00000, 0.90588, 0.00000),
(1.00000, 0.89020, 0.00000),
(1.00000, 0.87451, 0.00000),
(1.00000, 0.85882, 0.00000),
(1.00000, 0.84314, 0.00000),
(1.00000, 0.82745, 0.00000),
(1.00000, 0.81176, 0.00000),
(1.00000, 0.79608, 0.00000),
(1.00000, 0.78039, 0.00000),
(1.00000, 0.76471, 0.00000),
(1.00000, 0.74902, 0.00000),
(1.00000, 0.73333, 0.00000),
(1.00000, 0.71765, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.68627, 0.00000),
(1.00000, 0.67059, 0.00000),
(1.00000, 0.65490, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.62353, 0.00000),
(1.00000, 0.60784, 0.00000),
(1.00000, 0.59216, 0.00000),
(1.00000, 0.57647, 0.00000),
(1.00000, 0.56078, 0.00000),
(1.00000, 0.54510, 0.00000),
(1.00000, 0.52941, 0.00000),
(1.00000, 0.51373, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.48235, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.45098, 0.00000),
(1.00000, 0.43529, 0.00000),
(1.00000, 0.41961, 0.00000),
(1.00000, 0.40392, 0.00000),
(1.00000, 0.38824, 0.00000),
(1.00000, 0.37255, 0.00000),
(1.00000, 0.35686, 0.00000),
(1.00000, 0.34118, 0.00000),
(1.00000, 0.32549, 0.00000),
(1.00000, 0.30980, 0.00000),
(1.00000, 0.29412, 0.00000),
(1.00000, 0.27843, 0.00000),
(1.00000, 0.26275, 0.00000),
(1.00000, 0.24706, 0.00000),
(1.00000, 0.23137, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.18431, 0.00000),
(1.00000, 0.16863, 0.00000),
(1.00000, 0.15294, 0.00000),
(1.00000, 0.13725, 0.00000),
(1.00000, 0.12157, 0.00000),
(1.00000, 0.10588, 0.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.07451, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.04314, 0.00000),
(1.00000, 0.02745, 0.00000),
(1.00000, 0.01176, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.03137, 0.03137),
(1.00000, 0.06275, 0.06275),
(1.00000, 0.09412, 0.09412),
(1.00000, 0.12549, 0.12549),
(1.00000, 0.15686, 0.15686),
(1.00000, 0.18824, 0.18824),
(1.00000, 0.21961, 0.21961),
(1.00000, 0.25098, 0.25098),
(1.00000, 0.28235, 0.28235),
(1.00000, 0.31373, 0.31373),
(1.00000, 0.34510, 0.34510),
(1.00000, 0.37647, 0.37647),
(1.00000, 0.40784, 0.40784),
(1.00000, 0.43922, 0.43922),
(1.00000, 0.47059, 0.47059),
(1.00000, 0.50196, 0.50196),
(1.00000, 0.53333, 0.53333),
(1.00000, 0.56471, 0.56471),
(1.00000, 0.59608, 0.59608),
(1.00000, 0.62745, 0.62745),
(1.00000, 0.65882, 0.65882),
(1.00000, 0.69020, 0.69020),
(1.00000, 0.72157, 0.72157),
(1.00000, 0.75294, 0.75294),
(1.00000, 0.78431, 0.78431),
(1.00000, 0.81569, 0.81569),
(1.00000, 0.84706, 0.84706),
(1.00000, 0.87843, 0.87843),
(1.00000, 0.90980, 0.90980),
(1.00000, 0.94118, 0.94118),
(1.00000, 0.97255, 0.97255),
)
cmap_real = (
(0.00784, 0.00392, 0.00000),
(0.01569, 0.00784, 0.00000),
(0.02353, 0.01176, 0.00000),
(0.03137, 0.01569, 0.00000),
(0.03922, 0.01961, 0.00000),
(0.04706, 0.02353, 0.00000),
(0.05490, 0.02745, 0.00000),
(0.06275, 0.03137, 0.00000),
(0.07059, 0.03529, 0.00000),
(0.07843, 0.03922, 0.00000),
(0.08627, 0.04314, 0.00000),
(0.09412, 0.04706, 0.00000),
(0.10196, 0.05098, 0.00000),
(0.10980, 0.05490, 0.00000),
(0.11765, 0.05882, 0.00000),
(0.12549, 0.06275, 0.00000),
(0.13333, 0.06667, 0.00000),
(0.14118, 0.07059, 0.00000),
(0.14902, 0.07451, 0.00000),
(0.15686, 0.07843, 0.00000),
(0.16471, 0.08235, 0.00000),
(0.17255, 0.08627, 0.00000),
(0.18039, 0.09020, 0.00000),
(0.18824, 0.09412, 0.00000),
(0.19608, 0.09804, 0.00000),
(0.20392, 0.10196, 0.00000),
(0.21176, 0.10588, 0.00000),
(0.21961, 0.10980, 0.00000),
(0.22745, 0.11373, 0.00000),
(0.23529, 0.11765, 0.00000),
(0.24314, 0.12157, 0.00000),
(0.25098, 0.12549, 0.00000),
(0.25882, 0.12941, 0.00000),
(0.26667, 0.13333, 0.00000),
(0.27451, 0.13725, 0.00000),
(0.28235, 0.14118, 0.00000),
(0.29020, 0.14510, 0.00000),
(0.29804, 0.14902, 0.00000),
(0.30588, 0.15294, 0.00000),
(0.31373, 0.15686, 0.00000),
(0.32157, 0.16078, 0.00000),
(0.32941, 0.16471, 0.00000),
(0.33725, 0.16863, 0.00000),
(0.34510, 0.17255, 0.00000),
(0.35294, 0.17647, 0.00000),
(0.36078, 0.18039, 0.00000),
(0.36863, 0.18431, 0.00000),
(0.37647, 0.18824, 0.00000),
(0.38431, 0.19216, 0.00000),
(0.39216, 0.19608, 0.00000),
(0.40000, 0.20000, 0.00000),
(0.40784, 0.20392, 0.00000),
(0.41569, 0.20784, 0.00000),
(0.42353, 0.21176, 0.00000),
(0.43137, 0.21569, 0.00000),
(0.43922, 0.21961, 0.00000),
(0.44706, 0.22353, 0.00000),
(0.45490, 0.22745, 0.00000),
(0.46275, 0.23137, 0.00000),
(0.47059, 0.23529, 0.00000),
(0.47843, 0.23922, 0.00000),
(0.48627, 0.24314, 0.00000),
(0.49412, 0.24706, 0.00000),
(0.50196, 0.25098, 0.00000),
(0.50980, 0.25490, 0.00000),
(0.51765, 0.25882, 0.00000),
(0.52549, 0.26275, 0.00000),
(0.53333, 0.26667, 0.00000),
(0.54118, 0.27059, 0.00000),
(0.54902, 0.27451, 0.00000),
(0.55686, 0.27843, 0.00000),
(0.56471, 0.28235, 0.00000),
(0.57255, 0.28627, 0.00000),
(0.58039, 0.29020, 0.00000),
(0.58824, 0.29412, 0.00000),
(0.59608, 0.29804, 0.00000),
(0.60392, 0.30196, 0.00000),
(0.61176, 0.30588, 0.00000),
(0.61961, 0.30980, 0.00000),
(0.62745, 0.31373, 0.00000),
(0.63529, 0.31765, 0.00000),
(0.64314, 0.32157, 0.00000),
(0.65098, 0.32549, 0.00000),
(0.65882, 0.32941, 0.00000),
(0.66667, 0.33333, 0.00000),
(0.67451, 0.33725, 0.00000),
(0.68235, 0.34118, 0.00000),
(0.69020, 0.34510, 0.00000),
(0.69804, 0.34902, 0.00000),
(0.70588, 0.35294, 0.00000),
(0.71373, 0.35686, 0.00000),
(0.72157, 0.36078, 0.00000),
(0.72941, 0.36471, 0.00000),
(0.73725, 0.36863, 0.00000),
(0.74510, 0.37255, 0.00000),
(0.75294, 0.37647, 0.00000),
(0.76078, 0.38039, 0.00000),
(0.76863, 0.38431, 0.00000),
(0.77647, 0.38824, 0.00000),
(0.78431, 0.39216, 0.00000),
(0.79216, 0.39608, 0.00000),
(0.80000, 0.40000, 0.00000),
(0.80784, 0.40392, 0.00000),
(0.81569, 0.40784, 0.00000),
(0.82353, 0.41176, 0.00000),
(0.83137, 0.41569, 0.00000),
(0.83922, 0.41961, 0.00000),
(0.84706, 0.42353, 0.00000),
(0.85490, 0.42745, 0.00000),
(0.86275, 0.43137, 0.00000),
(0.87059, 0.43529, 0.00000),
(0.87843, 0.43922, 0.00000),
(0.88627, 0.44314, 0.00000),
(0.89412, 0.44706, 0.00000),
(0.90196, 0.45098, 0.00000),
(0.90980, 0.45490, 0.00000),
(0.91765, 0.45882, 0.00000),
(0.92549, 0.46275, 0.00000),
(0.93333, 0.46667, 0.00000),
(0.94118, 0.47059, 0.00000),
(0.94902, 0.47451, 0.00000),
(0.95686, 0.47843, 0.00000),
(0.96471, 0.48235, 0.00000),
(0.97255, 0.48627, 0.00000),
(0.98039, 0.49020, 0.00000),
(0.98824, 0.49412, 0.00000),
(0.99608, 0.49804, 0.00000),
(1.00000, 0.50196, 0.00000),
(1.00000, 0.50588, 0.00784),
(1.00000, 0.50980, 0.01569),
(1.00000, 0.51373, 0.02353),
(1.00000, 0.51765, 0.03137),
(1.00000, 0.52157, 0.03922),
(1.00000, 0.52549, 0.04706),
(1.00000, 0.52941, 0.05490),
(1.00000, 0.53333, 0.06275),
(1.00000, 0.53725, 0.07059),
(1.00000, 0.54118, 0.07843),
(1.00000, 0.54510, 0.08627),
(1.00000, 0.54902, 0.09412),
(1.00000, 0.55294, 0.10196),
(1.00000, 0.55686, 0.10980),
(1.00000, 0.56078, 0.11765),
(1.00000, 0.56471, 0.12549),
(1.00000, 0.56863, 0.13333),
(1.00000, 0.57255, 0.14118),
(1.00000, 0.57647, 0.14902),
(1.00000, 0.58039, 0.15686),
(1.00000, 0.58431, 0.16471),
(1.00000, 0.58824, 0.17255),
(1.00000, 0.59216, 0.18039),
(1.00000, 0.59608, 0.18824),
(1.00000, 0.60000, 0.19608),
(1.00000, 0.60392, 0.20392),
(1.00000, 0.60784, 0.21176),
(1.00000, 0.61176, 0.21961),
(1.00000, 0.61569, 0.22745),
(1.00000, 0.61961, 0.23529),
(1.00000, 0.62353, 0.24314),
(1.00000, 0.62745, 0.25098),
(1.00000, 0.63137, 0.25882),
(1.00000, 0.63529, 0.26667),
(1.00000, 0.63922, 0.27451),
(1.00000, 0.64314, 0.28235),
(1.00000, 0.64706, 0.29020),
(1.00000, 0.65098, 0.29804),
(1.00000, 0.65490, 0.30588),
(1.00000, 0.65882, 0.31373),
(1.00000, 0.66275, 0.32157),
(1.00000, 0.66667, 0.32941),
(1.00000, 0.67059, 0.33725),
(1.00000, 0.67451, 0.34510),
(1.00000, 0.67843, 0.35294),
(1.00000, 0.68235, 0.36078),
(1.00000, 0.68627, 0.36863),
(1.00000, 0.69020, 0.37647),
(1.00000, 0.69412, 0.38431),
(1.00000, 0.69804, 0.39216),
(1.00000, 0.70196, 0.40000),
(1.00000, 0.70588, 0.40784),
(1.00000, 0.70980, 0.41569),
(1.00000, 0.71373, 0.42353),
(1.00000, 0.71765, 0.43137),
(1.00000, 0.72157, 0.43922),
(1.00000, 0.72549, 0.44706),
(1.00000, 0.72941, 0.45490),
(1.00000, 0.73333, 0.46275),
(1.00000, 0.73725, 0.47059),
(1.00000, 0.74118, 0.47843),
(1.00000, 0.74510, 0.48627),
(1.00000, 0.74902, 0.49412),
(1.00000, 0.75294, 0.50196),
(1.00000, 0.75686, 0.50980),
(1.00000, 0.76078, 0.51765),
(1.00000, 0.76471, 0.52549),
(1.00000, 0.76863, 0.53333),
(1.00000, 0.77255, 0.54118),
(1.00000, 0.77647, 0.54902),
(1.00000, 0.78039, 0.55686),
(1.00000, 0.78431, 0.56471),
(1.00000, 0.78824, 0.57255),
(1.00000, 0.79216, 0.58039),
(1.00000, 0.79608, 0.58824),
(1.00000, 0.80000, 0.59608),
(1.00000, 0.80392, 0.60392),
(1.00000, 0.80784, 0.61176),
(1.00000, 0.81176, 0.61961),
(1.00000, 0.81569, 0.62745),
(1.00000, 0.81961, 0.63529),
(1.00000, 0.82353, 0.64314),
(1.00000, 0.82745, 0.65098),
(1.00000, 0.83137, 0.65882),
(1.00000, 0.83529, 0.66667),
(1.00000, 0.83922, 0.67451),
(1.00000, 0.84314, 0.68235),
(1.00000, 0.84706, 0.69020),
(1.00000, 0.85098, 0.69804),
(1.00000, 0.85490, 0.70588),
(1.00000, 0.85882, 0.71373),
(1.00000, 0.86275, 0.72157),
(1.00000, 0.86667, 0.72941),
(1.00000, 0.87059, 0.73725),
(1.00000, 0.87451, 0.74510),
(1.00000, 0.87843, 0.75294),
(1.00000, 0.88235, 0.76078),
(1.00000, 0.88627, 0.76863),
(1.00000, 0.89020, 0.77647),
(1.00000, 0.89412, 0.78431),
(1.00000, 0.89804, 0.79216),
(1.00000, 0.90196, 0.80000),
(1.00000, 0.90588, 0.80784),
(1.00000, 0.90980, 0.81569),
(1.00000, 0.91373, 0.82353),
(1.00000, 0.91765, 0.83137),
(1.00000, 0.92157, 0.83922),
(1.00000, 0.92549, 0.84706),
(1.00000, 0.92941, 0.85490),
(1.00000, 0.93333, 0.86275),
(1.00000, 0.93725, 0.87059),
(1.00000, 0.94118, 0.87843),
(1.00000, 0.94510, 0.88627),
(1.00000, 0.94902, 0.89412),
(1.00000, 0.95294, 0.90196),
(1.00000, 0.95686, 0.90980),
(1.00000, 0.96078, 0.91765),
(1.00000, 0.96471, 0.92549),
(1.00000, 0.96863, 0.93333),
(1.00000, 0.97255, 0.94118),
(1.00000, 0.97647, 0.94902),
(1.00000, 0.98039, 0.95686),
(1.00000, 0.98431, 0.96471),
(1.00000, 0.98824, 0.97255),
(1.00000, 0.99216, 0.98039),
(1.00000, 0.99608, 0.98824),
(1.00000, 1.00000, 0.99608),
(1.00000, 1.00000, 1.00000),
)
cmap_idl15 = (
(0.00000, 0.00000, 0.00000),
(0.07059, 0.00392, 0.00392),
(0.14118, 0.00784, 0.01176),
(0.21176, 0.01176, 0.01961),
(0.28235, 0.01569, 0.02745),
(0.35294, 0.01961, 0.03529),
(0.42353, 0.02353, 0.04314),
(0.49804, 0.02745, 0.05098),
(0.56863, 0.03137, 0.05882),
(0.63922, 0.03529, 0.06667),
(0.70980, 0.03922, 0.07451),
(0.78039, 0.04314, 0.08235),
(0.85098, 0.04706, 0.09020),
(0.92157, 0.05098, 0.09804),
(0.99608, 0.05490, 0.10588),
(0.97647, 0.05882, 0.11373),
(0.95686, 0.06275, 0.12157),
(0.93725, 0.06667, 0.12941),
(0.91765, 0.07059, 0.13725),
(0.89804, 0.07451, 0.14510),
(0.87451, 0.07843, 0.15294),
(0.85490, 0.08235, 0.16078),
(0.83529, 0.08627, 0.16863),
(0.81569, 0.09020, 0.17647),
(0.79608, 0.09412, 0.18431),
(0.77255, 0.09804, 0.19216),
(0.75294, 0.10196, 0.20000),
(0.73333, 0.10588, 0.20784),
(0.71373, 0.10980, 0.21569),
(0.69412, 0.11373, 0.22353),
(0.67451, 0.11765, 0.23137),
(0.65098, 0.12157, 0.23922),
(0.63137, 0.12549, 0.24706),
(0.61176, 0.12941, 0.25490),
(0.59216, 0.13333, 0.26275),
(0.57255, 0.13725, 0.27059),
(0.54902, 0.14118, 0.27843),
(0.52941, 0.14510, 0.28627),
(0.50980, 0.14902, 0.29412),
(0.49020, 0.15294, 0.30196),
(0.47059, 0.15686, 0.30980),
(0.45098, 0.16078, 0.31765),
(0.42745, 0.16471, 0.32549),
(0.40784, 0.16863, 0.33333),
(0.38824, 0.17255, 0.34118),
(0.36863, 0.17647, 0.34902),
(0.34902, 0.18039, 0.35686),
(0.32549, 0.18431, 0.36471),
(0.30588, 0.18824, 0.37255),
(0.28627, 0.19216, 0.38039),
(0.26667, 0.19608, 0.38824),
(0.24706, 0.20000, 0.39608),
(0.22745, 0.20392, 0.40392),
(0.20392, 0.20784, 0.41176),
(0.18431, 0.21176, 0.41961),
(0.16471, 0.21569, 0.42745),
(0.14510, 0.21961, 0.43529),
(0.12549, 0.22353, 0.44314),
(0.10196, 0.22745, 0.45098),
(0.08235, 0.23137, 0.45882),
(0.06275, 0.23529, 0.46667),
(0.04314, 0.23922, 0.47451),
(0.02353, 0.24314, 0.48235),
(0.00000, 0.24706, 0.49020),
(0.25098, 0.25098, 0.49804),
(0.25490, 0.25490, 0.50588),
(0.25882, 0.25882, 0.51373),
(0.26275, 0.26275, 0.52157),
(0.26667, 0.26667, 0.52941),
(0.27059, 0.27059, 0.53725),
(0.27451, 0.27451, 0.54510),
(0.27843, 0.27843, 0.55294),
(0.28235, 0.28235, 0.56078),
(0.28627, 0.28627, 0.56863),
(0.29020, 0.29020, 0.57647),
(0.29412, 0.29412, 0.58431),
(0.29804, 0.29804, 0.59216),
(0.30196, 0.30196, 0.60000),
(0.30588, 0.30588, 0.60784),
(0.30980, 0.30980, 0.61569),
(0.31373, 0.31373, 0.62353),
(0.31765, 0.31765, 0.63137),
(0.32157, 0.32157, 0.63922),
(0.32549, 0.32549, 0.64706),
(0.32941, 0.32941, 0.65490),
(0.33333, 0.33333, 0.66275),
(0.33725, 0.33725, 0.67059),
(0.34118, 0.34118, 0.67843),
(0.34510, 0.34510, 0.68627),
(0.34902, 0.34902, 0.69412),
(0.35294, 0.35294, 0.70196),
(0.35686, 0.35686, 0.70980),
(0.36078, 0.36078, 0.71765),
(0.36471, 0.36471, 0.72549),
(0.36863, 0.36863, 0.73333),
(0.37255, 0.37255, 0.74118),
(0.37647, 0.37647, 0.74902),
(0.38039, 0.38039, 0.75686),
(0.38431, 0.38431, 0.76471),
(0.38824, 0.38824, 0.77255),
(0.39216, 0.39216, 0.78039),
(0.39608, 0.39608, 0.78824),
(0.40000, 0.40000, 0.79608),
(0.40392, 0.40392, 0.80392),
(0.40784, 0.40784, 0.81176),
(0.41176, 0.41176, 0.81961),
(0.41569, 0.41569, 0.82745),
(0.41961, 0.41961, 0.83529),
(0.42353, 0.42353, 0.84314),
(0.42745, 0.42745, 0.85098),
(0.43137, 0.43137, 0.85882),
(0.43529, 0.43529, 0.86667),
(0.43922, 0.43922, 0.87451),
(0.44314, 0.44314, 0.88235),
(0.44706, 0.44706, 0.89020),
(0.45098, 0.45098, 0.89804),
(0.45490, 0.45490, 0.90588),
(0.45882, 0.45882, 0.91373),
(0.46275, 0.46275, 0.92157),
(0.46667, 0.46667, 0.92941),
(0.47059, 0.47059, 0.93725),
(0.47451, 0.47451, 0.94510),
(0.47843, 0.47843, 0.95294),
(0.48235, 0.48235, 0.96078),
(0.48627, 0.48627, 0.96863),
(0.49020, 0.49020, 0.97647),
(0.49412, 0.49412, 0.98431),
(0.49804, 0.49804, 0.99216),
(0.50196, 0.50196, 1.00000),
(0.50588, 0.50588, 0.98431),
(0.50980, 0.50980, 0.96863),
(0.51373, 0.51373, 0.95294),
(0.51765, 0.51765, 0.93333),
(0.52157, 0.52157, 0.91765),
(0.52549, 0.52549, 0.90196),
(0.52941, 0.52941, 0.88627),
(0.53333, 0.53333, 0.86667),
(0.53725, 0.53725, 0.85098),
(0.54118, 0.54118, 0.83529),
(0.54510, 0.54510, 0.81961),
(0.54902, 0.54902, 0.80000),
(0.55294, 0.55294, 0.78431),
(0.55686, 0.55686, 0.76863),
(0.56078, 0.56078, 0.75294),
(0.56471, 0.56471, 0.73333),
(0.56863, 0.56863, 0.71765),
(0.57255, 0.57255, 0.70196),
(0.57647, 0.57647, 0.68627),
(0.58039, 0.58039, 0.66667),
(0.58431, 0.58431, 0.65098),
(0.58824, 0.58824, 0.63529),
(0.59216, 0.59216, 0.61961),
(0.59608, 0.59608, 0.60000),
(0.60000, 0.60000, 0.58431),
(0.60392, 0.60392, 0.56863),
(0.60784, 0.60784, 0.55294),
(0.61176, 0.61176, 0.53333),
(0.61569, 0.61569, 0.51765),
(0.61961, 0.61961, 0.50196),
(0.62353, 0.62353, 0.48627),
(0.62745, 0.62745, 0.46667),
(0.63137, 0.63137, 0.45098),
(0.63529, 0.63529, 0.43529),
(0.63922, 0.63922, 0.41961),
(0.64314, 0.64314, 0.40000),
(0.64706, 0.64706, 0.38431),
(0.65098, 0.65098, 0.36863),
(0.65490, 0.65490, 0.35294),
(0.65882, 0.65882, 0.33333),
(0.66275, 0.66275, 0.31765),
(0.66667, 0.66667, 0.30196),
(0.67059, 0.67059, 0.28627),
(0.67451, 0.67451, 0.26667),
(0.67843, 0.67843, 0.25098),
(0.68235, 0.68235, 0.23529),
(0.68627, 0.68627, 0.21961),
(0.69020, 0.69020, 0.20000),
(0.69412, 0.69412, 0.18431),
(0.69804, 0.69804, 0.16863),
(0.70196, 0.70196, 0.15294),
(0.70588, 0.70588, 0.13333),
(0.70980, 0.70980, 0.11765),
(0.71373, 0.71373, 0.10196),
(0.71765, 0.71765, 0.08627),
(0.72157, 0.72157, 0.06667),
(0.72549, 0.72549, 0.05098),
(0.72941, 0.72941, 0.03529),
(0.73333, 0.73333, 0.01961),
(0.73725, 0.73725, 0.00000),
(0.74118, 0.74118, 0.01176),
(0.74510, 0.74510, 0.02745),
(0.74902, 0.74902, 0.04314),
(0.75294, 0.75294, 0.05882),
(0.75686, 0.75686, 0.07451),
(0.76078, 0.76078, 0.08627),
(0.76471, 0.76471, 0.10196),
(0.76863, 0.76863, 0.11765),
(0.77255, 0.77255, 0.13333),
(0.77647, 0.77647, 0.14902),
(0.78039, 0.78039, 0.16078),
(0.78431, 0.78431, 0.17647),
(0.78824, 0.78824, 0.19216),
(0.79216, 0.79216, 0.20784),
(0.79608, 0.79608, 0.22353),
(0.80000, 0.80000, 0.23529),
(0.80392, 0.80392, 0.25098),
(0.80784, 0.80784, 0.26667),
(0.81176, 0.81176, 0.28235),
(0.81569, 0.81569, 0.29804),
(0.81961, 0.81961, 0.30980),
(0.82353, 0.82353, 0.32549),
(0.82745, 0.82745, 0.34118),
(0.83137, 0.83137, 0.35686),
(0.83529, 0.83529, 0.37255),
(0.83922, 0.83922, 0.38431),
(0.84314, 0.84314, 0.40000),
(0.84706, 0.84706, 0.41569),
(0.85098, 0.85098, 0.43137),
(0.85490, 0.85490, 0.44706),
(0.85882, 0.85882, 0.45882),
(0.86275, 0.86275, 0.47451),
(0.86667, 0.86667, 0.49020),
(0.87059, 0.87059, 0.50588),
(0.87451, 0.87451, 0.52157),
(0.87843, 0.87843, 0.53725),
(0.88235, 0.88235, 0.54902),
(0.88627, 0.88627, 0.56471),
(0.89020, 0.89020, 0.58039),
(0.89412, 0.89412, 0.59608),
(0.89804, 0.89804, 0.61176),
(0.90196, 0.90196, 0.62353),
(0.90588, 0.90588, 0.63922),
(0.90980, 0.90980, 0.65490),
(0.91373, 0.91373, 0.67059),
(0.91765, 0.91765, 0.68627),
(0.92157, 0.92157, 0.69804),
(0.92549, 0.92549, 0.71373),
(0.92941, 0.92941, 0.72941),
(0.93333, 0.93333, 0.74510),
(0.93725, 0.93725, 0.76078),
(0.94118, 0.94118, 0.77255),
(0.94510, 0.94510, 0.78824),
(0.94902, 0.94902, 0.80392),
(0.95294, 0.95294, 0.81961),
(0.95686, 0.95686, 0.83529),
(0.96078, 0.96078, 0.84706),
(0.96471, 0.96471, 0.86275),
(0.96863, 0.96863, 0.87843),
(0.97255, 0.97255, 0.89412),
(0.97647, 0.97647, 0.90980),
(0.98039, 0.98039, 0.92157),
(0.98431, 0.98431, 0.93725),
(0.98824, 0.98824, 0.95294),
(0.99216, 0.99216, 0.96863),
(0.99608, 0.99608, 0.98431),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow3 = (
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.00000),
(0.00000, 0.00000, 0.03922),
(0.00000, 0.00000, 0.07843),
(0.00000, 0.00000, 0.11765),
(0.00000, 0.00000, 0.15686),
(0.00000, 0.00000, 0.20000),
(0.00000, 0.00000, 0.23922),
(0.00000, 0.00000, 0.27843),
(0.00000, 0.00000, 0.31765),
(0.00000, 0.00000, 0.35686),
(0.00000, 0.00000, 0.40000),
(0.00000, 0.00000, 0.43922),
(0.00000, 0.00000, 0.47843),
(0.00000, 0.00000, 0.51765),
(0.00000, 0.00000, 0.55686),
(0.00000, 0.00000, 0.60000),
(0.00000, 0.00000, 0.63922),
(0.00000, 0.00000, 0.67843),
(0.00000, 0.00000, 0.71765),
(0.00000, 0.00000, 0.75686),
(0.00000, 0.00000, 0.80000),
(0.00000, 0.00000, 0.83922),
(0.00000, 0.00000, 0.87843),
(0.00000, 0.00000, 0.91765),
(0.00000, 0.00000, 0.95686),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03137, 1.00000),
(0.00000, 0.06275, 1.00000),
(0.00000, 0.09412, 1.00000),
(0.00000, 0.12549, 1.00000),
(0.00000, 0.15686, 1.00000),
(0.00000, 0.18824, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25490, 1.00000),
(0.00000, 0.28627, 1.00000),
(0.00000, 0.31765, 1.00000),
(0.00000, 0.34902, 1.00000),
(0.00000, 0.38039, 1.00000),
(0.00000, 0.41176, 1.00000),
(0.00000, 0.44314, 1.00000),
(0.00000, 0.47843, 1.00000),
(0.00000, 0.49804, 1.00000),
(0.00000, 0.51765, 1.00000),
(0.00000, 0.53725, 1.00000),
(0.00000, 0.55686, 1.00000),
(0.00000, 0.58039, 1.00000),
(0.00000, 0.60000, 1.00000),
(0.00000, 0.61961, 1.00000),
(0.00000, 0.63922, 1.00000),
(0.00000, 0.65882, 1.00000),
(0.00000, 0.68235, 1.00000),
(0.00000, 0.70196, 1.00000),
(0.00000, 0.72157, 1.00000),
(0.00000, 0.74118, 1.00000),
(0.00000, 0.76078, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.79608, 1.00000),
(0.00000, 0.81176, 1.00000),
(0.00000, 0.82745, 1.00000),
(0.00000, 0.83922, 1.00000),
(0.00000, 0.85490, 1.00000),
(0.00000, 0.87059, 1.00000),
(0.00000, 0.88235, 1.00000),
(0.00000, 0.89804, 1.00000),
(0.00000, 0.91373, 1.00000),
(0.00000, 0.92549, 1.00000),
(0.00000, 0.94118, 1.00000),
(0.00000, 0.95686, 1.00000),
(0.00000, 0.96863, 1.00000),
(0.00000, 0.98431, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.98039),
(0.00000, 1.00000, 0.96078),
(0.00000, 1.00000, 0.94118),
(0.00000, 1.00000, 0.92157),
(0.00000, 1.00000, 0.90196),
(0.00000, 1.00000, 0.88235),
(0.00000, 1.00000, 0.86275),
(0.00000, 1.00000, 0.84314),
(0.00000, 1.00000, 0.82353),
(0.00000, 1.00000, 0.80392),
(0.00000, 1.00000, 0.78431),
(0.00000, 1.00000, 0.76471),
(0.00000, 1.00000, 0.74510),
(0.00000, 1.00000, 0.72549),
(0.00000, 1.00000, 0.70588),
(0.00000, 1.00000, 0.65882),
(0.00000, 1.00000, 0.61176),
(0.00000, 1.00000, 0.56471),
(0.00000, 1.00000, 0.51765),
(0.00000, 1.00000, 0.47059),
(0.00000, 1.00000, 0.42353),
(0.00000, 1.00000, 0.37647),
(0.00000, 1.00000, 0.32549),
(0.00000, 1.00000, 0.27843),
(0.00000, 1.00000, 0.23137),
(0.00000, 1.00000, 0.18431),
(0.00000, 1.00000, 0.13725),
(0.00000, 1.00000, 0.09020),
(0.00000, 1.00000, 0.04314),
(0.00000, 1.00000, 0.00000),
(0.04706, 1.00000, 0.00000),
(0.09412, 1.00000, 0.00000),
(0.14118, 1.00000, 0.00000),
(0.18824, 1.00000, 0.00000),
(0.23529, 1.00000, 0.00000),
(0.28235, 1.00000, 0.00000),
(0.32941, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.42353, 1.00000, 0.00000),
(0.47059, 1.00000, 0.00000),
(0.51765, 1.00000, 0.00000),
(0.56471, 1.00000, 0.00000),
(0.61176, 1.00000, 0.00000),
(0.65882, 1.00000, 0.00000),
(0.70588, 1.00000, 0.00000),
(0.72549, 1.00000, 0.00000),
(0.74510, 1.00000, 0.00000),
(0.76471, 1.00000, 0.00000),
(0.78431, 1.00000, 0.00000),
(0.80392, 1.00000, 0.00000),
(0.82353, 1.00000, 0.00000),
(0.84314, 1.00000, 0.00000),
(0.86275, 1.00000, 0.00000),
(0.88235, 1.00000, 0.00000),
(0.90196, 1.00000, 0.00000),
(0.92157, 1.00000, 0.00000),
(0.94118, 1.00000, 0.00000),
(0.96078, 1.00000, 0.00000),
(0.98039, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(0.99608, 0.98039, 0.00000),
(0.99608, 0.96078, 0.00000),
(0.99608, 0.94510, 0.00000),
(0.99608, 0.92549, 0.00000),
(0.99608, 0.90588, 0.00000),
(0.99216, 0.89020, 0.00000),
(0.99216, 0.87059, 0.00000),
(0.99216, 0.85098, 0.00000),
(0.99216, 0.83529, 0.00000),
(0.99216, 0.81569, 0.00000),
(0.98824, 0.79608, 0.00000),
(0.98824, 0.78039, 0.00000),
(0.98824, 0.76078, 0.00000),
(0.98824, 0.74118, 0.00000),
(0.98824, 0.72549, 0.00000),
(0.98824, 0.70588, 0.00000),
(0.98824, 0.69020, 0.00000),
(0.98824, 0.67451, 0.00000),
(0.98824, 0.65490, 0.00000),
(0.99216, 0.63922, 0.00000),
(0.99216, 0.62353, 0.00000),
(0.99216, 0.60392, 0.00000),
(0.99216, 0.58824, 0.00000),
(0.99216, 0.57255, 0.00000),
(0.99608, 0.55294, 0.00000),
(0.99608, 0.53725, 0.00000),
(0.99608, 0.52157, 0.00000),
(0.99608, 0.50196, 0.00000),
(0.99608, 0.48627, 0.00000),
(1.00000, 0.47059, 0.00000),
(1.00000, 0.43922, 0.00000),
(1.00000, 0.40784, 0.00000),
(1.00000, 0.37647, 0.00000),
(1.00000, 0.34510, 0.00000),
(1.00000, 0.31373, 0.00000),
(1.00000, 0.28235, 0.00000),
(1.00000, 0.25098, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 0.18431, 0.00000),
(1.00000, 0.15294, 0.00000),
(1.00000, 0.12157, 0.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.02745, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.00000, 0.04706),
(1.00000, 0.00000, 0.09412),
(1.00000, 0.00000, 0.14118),
(1.00000, 0.00000, 0.18824),
(1.00000, 0.00000, 0.23529),
(1.00000, 0.00000, 0.28235),
(1.00000, 0.00000, 0.32941),
(1.00000, 0.00000, 0.37647),
(1.00000, 0.00000, 0.42353),
(1.00000, 0.00000, 0.47059),
(1.00000, 0.00000, 0.51765),
(1.00000, 0.00000, 0.56471),
(1.00000, 0.00000, 0.61176),
(1.00000, 0.00000, 0.65882),
(1.00000, 0.00000, 0.70588),
(1.00000, 0.00000, 0.72549),
(1.00000, 0.00000, 0.74902),
(1.00000, 0.00000, 0.77255),
(1.00000, 0.00000, 0.79608),
(1.00000, 0.00000, 0.81569),
(1.00000, 0.00000, 0.83922),
(1.00000, 0.00000, 0.86275),
(1.00000, 0.00000, 0.88627),
(1.00000, 0.00000, 0.90588),
(1.00000, 0.00000, 0.92941),
(1.00000, 0.00000, 0.95294),
(1.00000, 0.00000, 0.97647),
(1.00000, 0.00000, 1.00000),
(1.00000, 0.03529, 1.00000),
(1.00000, 0.07059, 1.00000),
(1.00000, 0.10588, 1.00000),
(1.00000, 0.14118, 1.00000),
(1.00000, 0.18039, 1.00000),
(1.00000, 0.21569, 1.00000),
(1.00000, 0.25098, 1.00000),
(1.00000, 0.28627, 1.00000),
(1.00000, 0.32549, 1.00000),
(1.00000, 0.36078, 1.00000),
(1.00000, 0.39608, 1.00000),
(1.00000, 0.43137, 1.00000),
(1.00000, 0.47059, 1.00000),
(1.00000, 0.48627, 1.00000),
(1.00000, 0.50588, 1.00000),
(1.00000, 0.52157, 1.00000),
(1.00000, 0.54118, 1.00000),
(1.00000, 0.56078, 1.00000),
(1.00000, 0.57647, 1.00000),
(1.00000, 0.59608, 1.00000),
(1.00000, 0.61176, 1.00000),
(1.00000, 0.63137, 1.00000),
(1.00000, 0.65098, 1.00000),
(1.00000, 0.66667, 1.00000),
(1.00000, 0.68627, 1.00000),
(1.00000, 0.70588, 1.00000),
(1.00000, 0.74510, 1.00000),
(1.00000, 0.78824, 1.00000),
(1.00000, 0.83137, 1.00000),
(1.00000, 0.87059, 1.00000),
(1.00000, 0.91373, 1.00000),
(1.00000, 0.95686, 1.00000),
(1.00000, 1.00000, 1.00000),
)
# to be eventually deprecated
cmap_ramp = cmap_gray
# needed length of a ginga color map
min_cmap_len = 256
class ColorMapError(Exception):
pass
class ColorMap(object):
def __init__(self, name, clst):
self.name = name
self.clst = clst
def add_cmap(name, clst):
global cmaps
assert len(clst) == min_cmap_len, \
ValueError("color map '%s' length mismatch %d != %d (needed)" % (
name, len(clst), min_cmap_len))
cmaps[name] = ColorMap(name, clst)
def get_cmap(name):
"""Get a color map array. May raise a KeyError if a map of the given name
does not exist.
"""
return cmaps[name]
def get_names():
res = list(cmaps.keys())
res = sorted(res, key=lambda s: s.lower())
return res
def matplotlib_to_ginga_cmap(cm, name=None):
if name is None:
name = cm.name
arr = cm(numpy.arange(min_cmap_len))
clst = tuple(map(lambda rec: tuple(rec)[:3], arr))
return ColorMap(name, clst)
def add_matplotlib_cmap(cm, name=None):
global cmaps
cmap = matplotlib_to_ginga_cmap(cm, name=name)
cmaps[cmap.name] = cmap
def add_matplotlib_cmaps():
import matplotlib.pyplot as plt
for name in plt.cm.datad.keys():
if not isinstance(name, str):
continue
cm = plt.get_cmap(name)
try:
add_matplotlib_cmap(cm, name=name)
except Exception as e:
print("Error adding colormap '%s': %s" % (
name, str(e)))
#pass
# Add colormaps from this file
cmaps = {}
for name, value in list(globals().items()):
if name.startswith('cmap_'):
key = name[5:]
add_cmap(key, value)
#END
|
bsd-3-clause
|
rtrwalker/geotecha
|
geotecha/consolidation/smear_zones.py
|
1
|
127140
|
# geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""Smear zones associated with vertical drain installation.
Smear zone permeability distributions etc.
"""
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
#import cmath
from numpy import log, sqrt
import scipy.special as special
def mu_ideal(n, *args):
"""Smear zone permeability/geometry parameter for ideal drain (no smear)
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
args : anything
`args` does not contribute to any calculations it is merely so you
can have other arguments such as s and kappa which are used in other
smear zone formulations.
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
The :math:`\\mu` parameter is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left({\\ln\\left({n}\\right)-\\frac{3}{4}}\\right)+
\\frac{1}{\\left({n^2-1}\\right)}\\left({1-\\frac{1}{4n^2}}
\\right)
where:
.. math:: n = \\frac{r_e}{r_w}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius
References
----------
.. [1] Hansbo, S. 1981. "Consolidation of Fine-Grained Soils by
Prefabricated Drains". In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
if np.any(n <= 1):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
term1 = n**2 / (n**2 - 1) * (log(n) - 0.75)
term2 = 1 / (n**2 - 1) * (1 - 1/(4 * n**2))
mu = term1 + term2
return mu
def mu_constant(n, s, kap):
"""Smear zone parameter for smear zone with constant permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw)
kap : float or ndarray of float.
Ratio of undisturbed horizontal permeability to smear zone
horizontal permeanility (kh / ks).
Returns
-------
mu : float
smear zone permeability/geometry parameter
Notes
-----
The :math:`\\mu` parameter is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left({\\ln\\left({\\frac{n}{s}}\\right)
+\\kappa\\ln\\left({s}\\right)
-\\frac{3}{4}}\\right)+
\\frac{s^2}{\\left({n^2-1}\\right)}\\left({1-\\frac{s^2}{4n^2}}
\\right)
+\\frac{\\kappa}{\\left({n^2-1}\\right)}\\left({\\frac{s^4-1}{4n^2}}
-s^2+1
\\right)
where:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability.
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n <= 1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s < 1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap <= 0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in
np.atleast_1d(kap)])))
if np.any(s > n):
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)]),
', '.join([str(v) for v in np.atleast_1d(n)])))
term1 = n**2 / (n**2 - 1) * (log(n/s) + kap * log(s) - 0.75)
term2 = s**2 / (n**2 - 1) * (1 - s**2 /(4 * n**2))
term3 = kap / (n**2 - 1) * ((s**4 - 1) / (4 * n**2) - s**2 +1)
mu = term1 + term2 + term3
return mu
def _sx(n, s):
"""Value of s=r/rw marking the start of overlapping linear smear zones
`s` is usually larger than `n` when considering overlapping smear zones
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
Returns
-------
sx : float or ndarray of float
Value of s=r/rw marking the start of the overlapping zone
Notes
-----
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
See Also
--------
mu_overlapping_linear : uses _sx
_kapx : used in mu_overlapping_linear
"""
sx = 2 * n - s
return sx
def _kapx(n, s, kap):
"""Value of kap=kh/ks for overlap part of intersecting linear smear zones
Assumes `s` is greater than `n`.
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw)
kap : float or ndarray of float.
Ratio of undisturbed horizontal permeability to smear zone
horizontal permeanility (kh / ks).
Returns
-------
kapx : float
Value of kap=kh/ks for overlap part of intersecting linear smear zones
Notes
-----
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
See Also
--------
mu_overlapping_linear : uses _kapx
_sx : used in mu_overlapping_linear
"""
sx = _sx(n, s)
kapx = 1 + (kap - 1) / (s - 1) * (sx - 1)
return kapx
def mu_overlapping_linear(n, s, kap):
"""\
Smear zone parameter for smear zone with overlapping linear permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
The smear zone parameter :math:`\\mu` is given by:
.. math::
\\mu_X =
\\left\\{\\begin{array}{lr}
\\mu_L\\left({n,s,\\kappa}\\right) & n\\geq s \\\\
\\frac{\\kappa}{\\kappa_X}\\mu_L
\\left({n, s_X,\\kappa_x}\\right)
& \\frac{s+1}{2}<n<s \\\\
\\frac{\\kappa}{\\kappa_X}\\mu_I
\\left({n}\\right) & n\\leq \\frac{s+1}{2}
\\end{array}\\right.
where :math:`\\mu_L` is the :math:`\\mu` parameter for non_overlapping
smear zones with linear permeability, :math:`\\mu_I` is the :math:`\\mu`
parameter for no smear zone, and:
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
See Also
--------
mu_linear : :math:`\\mu` for non-overlapping smear zones
mu_ideal : :math:`\\mu` for ideal drain with no smear zone
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def mu_intersecting(n, s, kap):
"""mu for intersecting smear zones that do not completely overlap"""
sx = _sx(n, s)
kapx = _kapx(n, s, kap)
mu = mu_linear(n, sx, kapx) * kap / kapx
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n <= 1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s < 1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap <= 0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in
np.atleast_1d(kap)])))
is_array = any([isinstance(v, np.ndarray) for v in [n, s, kap]])
n = np.atleast_1d(n)
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if len([v for v in [n, s] if v.shape == kap.shape]) != 2:
raise ValueError('n, s, and kap must have the same shape. You have '
'lengths for n, s, kap of {}, {}, {}.'.format(
len(n), len(s), len(kap)))
ideal = np.isclose(s, 1) | np.isclose(kap, 1)
normal = (n >= s) & (~ideal)
all_disturbed = (2 * n - s <= 1) & (~ideal)
intersecting = ~(ideal | normal | all_disturbed)
mu = np.empty_like(n, dtype=float)
mu[ideal] = mu_ideal(n[ideal])
mu[normal] = mu_linear(n[normal], s[normal], kap[normal])
mu[all_disturbed] = kap[all_disturbed] * mu_ideal(n[all_disturbed])
mu[intersecting] = mu_intersecting(n[intersecting], s[intersecting],
kap[intersecting])
if is_array:
return mu
else:
return mu[0]
def mu_linear(n, s, kap):
"""Smear zone parameter for smear zone linear variation of permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
For :math:`s\\neq\\kappa`, :math:`\\mu` is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left[{
\\ln\\left({\\frac{n}{s}}\\right)
-\\frac{3}{4}
+\\frac{s^2}{n^2}\\left({1-\\frac{s^2}{4n^2}}\\right)
-\\frac{\\kappa}{B}\\ln\\left({\\frac{\\kappa}{s}}\\right)
+\\frac{\\kappa B}{A^2 n^2}\\left({2-\\frac{B^2}{A^2 n^2}}
\\right)\\ln\\left({\\kappa}\\right)
-\\frac{\\kappa\\left({s-1}\\right)}{A n^2}
\\left\\{
2
+\\frac{1}{n^2}
\\left[
{\\frac{A-B}{A}\\left({\\frac{1}{A}}-\\frac{s+1}{2}
\\right)}
-\\frac{s+1}{2}
-\\frac{\\left({s-1}\\right)^2}{3}
\\right]
\\right\\}
}\\right]
and for the special case :math:`s=\\kappa`, :math:`\\mu` is given by:
.. math:: \\mu=\\frac{n^2}{\\left({n^2-1}\\right)}
\\left[{
\\ln\\left({\\frac{n}{s}}\\right)
-\\frac{3}{4}
+s-1
-\\frac{s^2}{n^2}\\left({1-\\frac{s^2}{12n^2}}\\right)
-\\frac{s}{n^2}\\left({2-\\frac{1}{3n^2}}\\right)
}\\right]
where :math:`A` and :math:`B` are:
.. math:: A=\\frac{\\kappa-1}{s-1}
.. math:: B=\\frac{s-\\kappa}{s-1}
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def mu_s_neq_kap(n, s, kap):
"""mu for when s != kap"""
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
term1 = n**2 / (n**2 - 1)
term2 = (log(n / s) + s ** 2 / (n ** 2) *
(1 - s ** 2 / (4 * n ** 2)) - 3 / 4)
term3 = kap * (1 - s ** 2 / n ** 2)
term4 = (1 / B * log(s / kap)
- 1 / (n ** 2 * A ** 2) * (kap - 1 - B * log(kap)))
term5 = term2 + term3 * term4
term6 = 1 / (n ** 2 * B)
term7 = (s ** 2 * log(s) - (s ** 2 - 1) / 2
+ 1 / A ** 2 * ((kap ** 2 - 1) / 2 - kap ** 2 * log(kap) + 2 * B * (kap * log(kap) - (kap - 1))))
term8 = -1 / (n ** 4 * A ** 2)
term9 = (B / 3 * (s ** 2 - 1) + 2 / 3 * (s ** 2 * kap - 1) - (s ** 2 - 1)
+ B / A ** 2 * ((kap ** 2 - 1) / 2 - kap ** 2 * log(kap) + 2 * B * (kap * log(kap) - (kap - 1))))
term10 = kap * (term6 * term7 + term8 * term9)
mu = term1 * (term5 + term10)
return mu
def mu_s_eq_kap(n, s):
"""mu for s == kap"""
term1 = n ** 2 / (n ** 2 - 1)
term2 = (log(n / s)
+ s ** 2 / (n ** 2) * (1 - s ** 2 / (4 * n ** 2)) - 3 / 4)
term3 = (-s / n ** 2 * (1 - s ** 2 / n ** 2) * (s - 1)
+ (1 - s ** 2 / n ** 2) * (s - 1))
term4 = (s / n ** 4 * (s ** 2 - 1)
- 2 * s / (3 * n ** 4) * (s ** 3 - 1)
- (s / n ** 2 - s ** 2 / n ** 2) * (s - 1))
mu = term1 * (term2 + term3 + term4)
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n<=1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s<1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(s>=n):
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)]),
', '.join([str(v) for v in np.atleast_1d(n)])))
is_array = any([isinstance(v, np.ndarray) for v in [n, s, kap]])
n = np.atleast_1d(n)
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if len([v for v in [n, s] if v.shape==kap.shape])!=2:
raise ValueError('n, s, and kap must have the same shape. You have '
'lengths for n, s, kap of {}, {}, {}.'.format(
len(n), len(s), len(kap)))
mu = np.empty_like(n, dtype=float)
ideal = np.isclose(s, 1) | np.isclose(kap, 1)
s_eq_kap = np.isclose(s, kap) & ~ideal
s_neq_kap = ~np.isclose(s, kap) & ~ideal
mu[ideal] = mu_ideal(n[ideal])
mu[s_eq_kap] = mu_s_eq_kap(n[s_eq_kap], s[s_eq_kap])
mu[s_neq_kap] = mu_s_neq_kap(n[s_neq_kap], s[s_neq_kap], kap[s_neq_kap])
if is_array:
return mu
else:
return mu[0]
def mu_parabolic(n, s, kap):
"""Smear zone parameter for parabolic variation of permeability
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh/ks).
Returns
-------
mu : float
Smear zone permeability/geometry parameter
Notes
-----
The smear zone parameter :math:`\\mu` is given by:
.. math:: \\mu = \\frac{n^2}{\\left({n^2-1}\\right)}
\\left({
\\frac{A^2}{n^2}\\mu_1+\\mu_2
}\\right)
where,
.. math:: \\mu_1=
\\frac{1}{A^2-B^2}
\\left({
s^2\\ln\\left({s}\\right)
-\\frac{1}{2}\\left({s^2-1}\\right)
}\\right)
-\\frac{1}{\\left({A^2-B^2}\\right)C^2}
\\left({
\\frac{A^2}{2}\\ln\\left({\\kappa}\\right)
+\\frac{ABE}{2}+\\frac{1}{2}-B
-\\left({A^2-B^2}\\right)\\ln\\left({\\kappa}\\right)
}\\right)
+\\frac{1}{n^2C^4}
\\left({
-\\left({\\frac{A^2}{2}+B^2}\\right)
\\ln\\left({\\kappa}\\right)
+\\frac{3ABE}{2}+\\frac{1}{2}-3B
}\\right)
.. math:: \\mu_2=
\\ln\\left({\\frac{n}{s}}\\right)
-\\frac{3}{4}
+\\frac{s^2}{n^2}\\left({1-\\frac{s^2}{4n^2}}\\right)
+A^2\\left({1-\\frac{s^2}{n^2}}\\right)
\\left[{
\\frac{1}{A^2-B^2}
\\left({
\\ln\\left({\\frac{s}{\\sqrt{\\kappa}}}\\right)
-\\frac{BE}{2A}
}\\right)
+\\frac{1}{n^2C^2}
\\left({
\\ln\\left({\\sqrt{\\kappa}}\\right)
-\\frac{BE}{2A}
}\\right)
}\\right]
where :math:`A`, :math:`B`, :math:`C` and :math:`E` are:
.. math:: A=\\sqrt{\\frac{\\kappa}{\\kappa-1}}
.. math:: B=\\frac{s}{s-1}
.. math:: C=\\frac{1}{s-1}
.. math:: E=\\ln\\left({\\frac{A+1}{A-1}}\\right)
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, Rohan, and Buddhima Indraratna. 2006. 'Vertical Drain
Consolidation with Parabolic Distribution of Permeability in
Smear Zone'. Journal of Geotechnical and Geoenvironmental
Engineering 132 (7): 937-41.
doi:10.1061/(ASCE)1090-0241(2006)132:7(937).
"""
def mu_p(n, s, kap):
"""mu for parabolic smear"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
term1 = (log(n / s) - 3 / 4 +
s ** 2 / n ** 2 * (1 - s ** 2 / (4 * n ** 2)))
term2 = (1 - s ** 2 / n ** 2) * A ** 2
term3 = 1 / (A ** 2 - B ** 2)
term4 = (log(s / sqrt(kap))) - (B / (2 * A) * log((A + 1) / (A - 1)))
term5 = 1 / (n ** 2 * C ** 2)
term6 = (log(sqrt(kap))) - (B / (2 * A) * log((A + 1) / (A - 1)))
mu2 = term1 + term2 * ((term3 * term4) + (term5 * term6))
term7 = (A ** 2 / n ** 2 * (1 / (A ** 2 - B ** 2)) * (s ** 2 * log(s)
- 1 / 2 * (s ** 2 - 1)))
term8 = -1 / (n ** 2 * C ** 2) * A ** 2 * (1 / (A ** 2 - B ** 2))
term9 = (A ** 2 / 2 * log(kap) + B * A / 2 * log((A + 1) / (A - 1))
+ 1 / 2 - B - (A ** 2 - B ** 2) * log(kap))
term12 = A ** 2 / 2 * log(kap)
term13 = (B * A / 2 * log((A + 1) / (A - 1)))
term14 = 1 / 2 - B
term15 = -(A ** 2 - B ** 2) * log(kap)
term10 = A ** 2 / (n ** 4 * C ** 4)
term11 = (-(A ** 2 / 2 + B ** 2) * (log(kap)) +
3 / 2 * A * B * log((A + 1) / (A - 1)) + 1 / 2 - 3 * B)
mu1 = term7 + (term8 * term9) + (term10 * term11)
mu = n ** 2 / (n ** 2 - 1) * (mu1 + mu2)
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if np.any(n<=1.0):
raise ValueError('n must be greater than 1. You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s<1.0):
raise ValueError('s must be greater than 1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(s>n):
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)]),
', '.join([str(v) for v in np.atleast_1d(n)])))
is_array = any([isinstance(v, np.ndarray) for v in [n, s, kap]])
n = np.atleast_1d(n)
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if len([v for v in [n, s] if v.shape==kap.shape])!=2:
raise ValueError('n, s, and kap must have the same shape. You have '
'lengths for n, s, kap of {}, {}, {}.'.format(
len(n), len(s), len(kap)))
mu = np.empty_like(n, dtype=float)
ideal = np.isclose(s, 1) | np.isclose(kap, 1)
mu[ideal] = mu_ideal(n[ideal])
mu[~ideal] = mu_p(n[~ideal], s[~ideal], kap[~ideal])
if is_array:
return mu
else:
return mu[0]
def mu_piecewise_constant(s, kap, n=None, kap_m=None):
"""Smear zone parameter for piecewise constant permeability distribution
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
mu : float
Smear zone permeability/geometry parameter
Notes
-----
The smear zone parameter :math:`\\mu` is given by:
.. math:: \\mu = \\frac{n^2}{\\left({n^2-1}\\right)}
\\sum\\limits_{i=1}^{m} \\kappa_i
\\left[{
\\frac{s_i^2}{n^2}\\ln
\\left({
\\frac{s_i}{s_{i-1}}
}\\right)
-\\frac{s_i^2-s_{i-1}^2}{2n^2}
-\\frac{\\left({s_i^2-s_{i-1}^2}\\right)^2}{4n^4}
}\\right]
+\\psi_i\\frac{s_i^2-s_{i-1}^2}{n^2}
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa_j
\\left[{
\\ln
\\left({
\\frac{s_j}{s_{j-1}}
}\\right)
-\\frac{s_j^2-s_{j-1}^2}{2n^2}
}\\right]
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
References
----------
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, Rohan T. 2011. 'Vertical Drain Consolidation Analysis in
One, Two and Three Dimensions'. Computers and
Geotechnics 38 (8): 1069-77. doi:10.1016/j.compgeo.2011.07.006.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
n = s[-1]
s_ = np.ones_like(s , dtype=float)
s_[1:] = s[:-1]
sumi = 0
for i in range(len(s)):
psi = 0
for j in range(i):
psi += kap[j] * (log(s[j] / s_[j])
- 0.5 * (s[j] ** 2 / n ** 2 - s_[j] ** 2 / n ** 2))
psi /= kap[i]
sumi += kap[i] * (
s[i] ** 2 / n ** 2 * log(s[i] / s_[i])
+ (psi - 0.5) * (s[i] ** 2 / n ** 2 - s_[i] ** 2 / n ** 2)
- 0.25 * (s[i] ** 2 - s_[i] ** 2) ** 2 / n ** 4
)
mu = sumi * n ** 2 / (n ** 2 - 1)
return mu
def mu_piecewise_linear(s, kap, n=None, kap_m=None):
"""Smear zone parameter for piecewise linear permeability distribution
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
s : list or 1d ndarray of float
Ratio of radii to drain radius (r_i/r_0). The first value
of s should be 1, i.e. at the drain soil interface.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at each
value of s.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
mu : float
Smear zone permeability/geometry parameter.
Notes
-----
With permeability in the ith segment defined by:
.. math:: \\frac{k_i}{k_{ref}}= \\frac{1}{\\kappa_{i-1}}
\\left({A_ir/r_w+B_i}\\right)
.. math:: A_i = \\frac{\\kappa_{i-1}/\\kappa_i-1}{s_i-s_{i-1}}
.. math:: B_i = \\frac{s_i-s_{i-1}\\kappa_{i-1}/\\kappa_i}{s_i-s_{i-1}}
the smear zone :math:`\\mu` parameter is given by:
.. math:: \\mu = \\frac{n^2}{n^2-1}
\\left[{
\\sum\\limits_{i=1}^{m}\\kappa_{i-1}\\theta_i
+ \\Psi_i
\\left({
\\frac{s_i^2-s_{i-1}^2}{n^2}
}\\right)
+\\mu_w
}\\right]
where,
.. math:: \\theta_i = \\left\\{
\\begin{array}{lr}
\\frac{s_i^2}{n^2}\\ln
\\left[{\\frac{s_i}{s_{i-1}}}\\right]
-\\frac{s_i^2-s_{i-1}^2}{2n^2}
-\\frac{\\left({s_i^2-s_{i-1}^2}\\right)^2}{4n^4}
& \\textrm{for } \\frac{\\kappa_{i-1}}{\\kappa_i}=1 \\\\
\\frac{\\left({s_i^2-s_{i-1}^2}\\right)}{3n^4}
\\left({3n^2-s_{i-1}^2-2s_{i-1}s_i}\\right)
& \\textrm{for }\\frac{\\kappa_{i-1}}{\\kappa_i}=
\\frac{s_i}{s_{i-1}} \\\\
\\begin{multline}
\\frac{s_i}{B_i n^2}\\ln\\left[{
\\frac{\\kappa_i s_i}{\\kappa_{i-1}s_{i-1}}}\\right]
-\\frac{s_i-s_{i-1}}{A_in^2}
\\left({1-\\frac{B_i^2}{A_i^2n^4}}\\right)
\\\\-\\frac{\\left({s_i-s_{i-1}}\\right)^2}{3A_in^2}
\\left({2s_i+s_{i-1}}\\right)
\\\\+\\frac{B_i}{A_i^2 n^4}\\ln\\left[{
\\frac{\\kappa_{i-1}}{\\kappa_i}}\\right]
\\left({1-\\frac{B_i^2}{A_i^2n^2}}\\right)
\\\\+\\frac{B_i}{2A_i^2 n^4}
\\left({
2s_i^2\\ln\\left[{
\\frac{\\kappa_{i-1}}{\\kappa_i}}\\right]
-s_i^2 + s_{i-1}^2
}\\right)
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
.. math:: \\Psi_i = \\sum\\limits_{j=1}^{i-1}\\kappa_{j-1}\\psi_j
.. math:: \\psi_i = \\left\\{
\\begin{array}{lr}
\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
- \\frac{s_j^2- s_{j-1}^2}{2n^2}
& \\textrm{for } \\frac{\\kappa_{j-1}}{\\kappa_j}=1 \\\\
\\frac{\\left({s_j - s_{j-1}}\\right)
\\left({n^2-s_js_{j-1}}\\right)}{s_jn^2}
& \\textrm{for }\\frac{\\kappa_{j-1}}{\\kappa_j}=
\\frac{s_j}{s_{j-1}} \\\\
\\begin{multline}
\\frac{1}{B_i}\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
+\\ln\\left[{\\frac{\\kappa_{j-1}}{\\kappa_j}}\\right]
\\left({\\frac{B_j}{A_j^2n^2}-\\frac{1}{B_j}}\\right)
\\\\-\\frac{s_j-s_{j-1}}{A_j^2n^2}
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{ref}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the radius of the ith radial point,
:math:`k_{ref}` is a convienient refernce permeability, usually
the undisturbed
horizontal permeability,
:math:`k_{hi}` is the horizontal
permeability at the ith radial point
References
----------
Derived by Rohan Walker in 2011 and 2014.
Derivation steps are the same as for mu_piecewise_constant in appendix of
[1]_ but permeability is linear in a segemetn as in [2]_.
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s < 1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) < 0):
raise ValueError('All s must satisfy s[i]>s[i-1]. You have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
if not np.isclose(s[0], 1):
raise ValueError('First value of s should be 1. You '
'have s[0]={}'.format(s[0]))
n = s[-1]
sumi = 0
for i in range(1, len(s)):
sumj = 0
for j in range(1, i):
# term1 = 0
if np.isclose(s[j - 1], s[j]):
term1=0
elif np.isclose(kap[j - 1], kap[j]):
term1 = (log(s[j] / s[j - 1])
- (s[j] ** 2 - s[j - 1] ** 2) / 2 / n ** 2)
elif np.isclose(kap[j-1] / kap[j], s[j] / s[j - 1]):
term1 = (s[j] - s[j - 1]) * (n ** 2 -
s[j - 1] * s[j]) / s[j] / n ** 2
else:
A = (kap[j-1] / kap[j] - 1) / (s[j] - s[j - 1])
B = (s[j] - kap[j-1] / kap[j] * s[j - 1]) / (s[j] - s[j - 1])
term1 = (1 / B * log(s[j] / s[j - 1])
+ (B / A ** 2 / n ** 2 - 1 / B) * log(kap[j-1] / kap[j])
- (s[j] - s[j - 1]) / A / n ** 2)
sumj += kap[j-1] * term1
# term1 = 0
if np.isclose(s[i - 1], s[i]):
term1=0
elif np.isclose(kap[i - 1], kap[i]):
term1 = (s[i] ** 2 / n ** 2 * log(s[i] / s[i - 1])
- (s[i] ** 2 - s[i - 1] ** 2) / 2 / n ** 2
- (s[i] ** 2 - s[i - 1] ** 2) ** 2 / 4 / n ** 4)
elif np.isclose(kap[i-1] / kap[i], s[i] / s[i - 1]):
term1 = ((s[i] - s[i - 1]) ** 2 / 3 / n ** 4 * (3 * n ** 2 -
s[i - 1] ** 2 - 2 * s[i - 1] * s[i]))
else:
A = (kap[i-1] / kap[i] - 1) / (s[i] - s[i - 1])
B = (s[i] - kap[i-1] / kap[i] * s[i - 1]) / (s[i] - s[i - 1])
term1 = (s[i] ** 2 / B / n ** 2 * log(kap[i] * s[i] /
kap[i-1] / s[i - 1])
- (s[i] - s[i - 1]) / A / n ** 2 *
(1 - B ** 2 / A ** 2 / n ** 2)
- (s[i] - s[i - 1]) ** 2 / 3 / A / n ** 4 *
(s[i - 1] + 2 * s[i])
+ B / A ** 2 / n ** 2 * log(kap[i-1] / kap[i]) *
(1 - B ** 2 / A ** 2 / n ** 2)
+ B / 2 / A ** 2 / n ** 4 * (s[i] ** 2 * (2 * log(kap[i-1] /
kap[i]) - 1) + s[i - 1] ** 2))
sumi += kap[i-1] * term1 + sumj * (s[i] ** 2 - s[i - 1] ** 2) / n ** 2
mu = sumi * n ** 2 / (n ** 2 - 1)
return mu
def mu_well_resistance(kh, qw, n, H, z=None):
"""Additional smear zone parameter for well resistance
Parameters
----------
kh : float
The normalising permeability used in calculating kappa for smear zone
calcs. Usually the undisturbed permeability i.e. the kh in
kappa = kh/ks
qw : float
Drain discharge capacity. qw = kw * pi * rw**2. Make sure
the kw used has the same units as kh.
n : float
Ratio of drain influence radius to drain radius (re/rw).
H : float
Length of drainage path.
z : float, optional
Evaluation depth. Default = None, in which case the well resistance
factor will be averaged.
Returns
-------
mu : float
mu parameter for well resistance
Notes
-----
The smear zone parameter :math:`\\mu_w` is given by:
.. math:: \\mu_w = \\frac{k_h}{q_w}\\pi z
\\left({2H-z}\\right)
\\left({1-\\frac{1}{n^2}}\\right)
when :math:`z` is None then the average :math:`\\mu_w` is given by:
.. math:: \\mu_{w\\textrm{average}} = \\frac{2k_h H^2}{3q_w}\\pi
\\left({1-\\frac{1}{n^2}}\\right)
where,
.. math:: n = \\frac{r_e}{r_w}
.. math:: q_w = k_w \\pi r_w^2
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`k_h` is the undisturbed horizontal permeability,
:math:`k_w` is the drain permeability
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if z is None:
mu = 2 * kh * H**2 / 3 / qw * np.pi * (1 - 1 / n**2)
else:
mu = kh / qw * np.pi * z * (2 * H - z) * (1 - 1 / n**2)
return mu
def k_parabolic(n, s, kap, si):
"""Permeability distribution for smear zone with parabolic permeability
Normalised with respect to undisturbed permeability. i.e. if you want the
actual permeability then multiply by whatever you used to determine kap.
Permeability is parabolic with value 1/kap at the drain soil interface
i.e. at s=1 k=k0=1/kap. for si>s, permeability=1.
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the permeability
i.e. si=ri/rw
Returns
-------
permeability : float or ndarray of float
Normalised permeability (i.e. ki/kh) at the si values.
Notes
-----
Parabolic distribution of permeability in smear zone is given by:
.. math:: \\frac{k_h^\\prime\\left({r}\\right)}{k_h}=
\\frac{\\kappa-1}{\\kappa}
\\left({A-B+C\\frac{r}{r_w}}\\right)
\\left({A+B-C\\frac{r}{r_w}}\\right)
where :math:`A`, :math:`B`, :math:`C` are:
.. math:: A=\\sqrt{\\frac{\\kappa}{\\kappa-1}}
.. math:: B=\\frac{s}{s-1}
.. math:: C=\\frac{1}{s-1}
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, Rohan, and Buddhima Indraratna. 2006. 'Vertical Drain
Consolidation with Parabolic Distribution of Permeability in
Smear Zone'. Journal of Geotechnical and Geoenvironmental
Engineering 132 (7): 937-41.
doi:10.1061/(ASCE)1090-0241(2006)132:7(937).
"""
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
def parabolic_part(n,s, kap, si):
"""Parbolic smear zone part i.e from si=1 to si=s"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
k0 = 1 / kap
return k0*(kap-1)*(A - B + C * si)*(A + B - C * si)
if np.isclose(s,1) or np.isclose(kap, 1):
return np.ones_like(si, dtype=float)
smear = (si < s)
permeability = np.ones_like(si, dtype=float)
permeability[smear] = parabolic_part(n, s, kap, si[smear])
return permeability
def k_linear(n, s, kap, si):
"""Permeability distribution for smear zone with linear permeability
Normalised with respect to undisturbed permeability. i.e. if you want the
actual permeability then multiply by whatever you used to determine kap.
Permeability is linear with value 1/kap at the drain soil interface
i.e. at s=1 k=k0=1/kap. for si>s, permeability=1.
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the permeability
i.e. si=ri/rw.
Returns
-------
permeability : float or ndarray of float
Normalised permeability (i.e. ki/kh) at the si values.
Notes
-----
Linear distribution of permeability in smear zone is given by:
.. math::
\\frac{k_h^\\prime\\left({r}\\right)}{k_h}=
\\left\\{\\begin{array}{lr}
\\frac{1}{\\kappa}
\\left({A\\frac{r}{r_w}+B}\\right)
& s\\neq\\kappa \\\\
\\frac{r}{\\kappa r_w}
& s=\\kappa \\end{array}\\right.
where :math:`A` and :math:`B` are:
.. math:: A=\\frac{\\kappa-1}{s-1}
.. math:: B=\\frac{s-\\kappa}{s-1}
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def s_neq_kap_part(n, s, kap, si):
"""Linear permeability in smear zome when s!=kap"""
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
k0 = 1 / kap
return k0*(A*si+B)
def s_eq_kap_part(n, s, si):
"""Linear permeability in smear zome when s!=kap"""
k0 = 1 / kap
return k0 * si
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s,1) or np.isclose(kap, 1):
return np.ones_like(si, dtype=float)
smear = (si < s)
permeability = np.ones_like(si, dtype=float)
if np.isclose(s, kap):
permeability[smear] = s_eq_kap_part(n, s, si[smear])
else:
permeability[smear] = s_neq_kap_part(n, s, kap, si[smear])
return permeability
def k_overlapping_linear(n, s, kap, si):
"""Permeability smear zone with overlapping linear permeability
Normalised with respect to undisturbed permeability. i.e. if you want the
actual permeability then multiply by whatever you used to determine kap.
mu parameter in equal strain radial consolidation equations e.g.
u = u0 * exp(-8*Th/mu)
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the permeability
i.e. si=ri/rw
Returns
-------
permeability : float or ndarray of float
Normalised permeability (i.e. ki/kh) at the si values.
Notes
-----
When :math:`n>s` the permeability is no different from the linear case.
When :math:`n\\leq (s+1)/2` then all the soil is disturbed
and the permeability everywhere is equal to :math:`1/\\kappa`.
When :math:`(s+1)/2<n<s` then the smear zones overlap.
the permeability for :math:`r/r_w<s_X` is given by:
.. math:: \\frac{k_h^\\prime\\left({r}\\right)}{k_h}=
\\left\\{\\begin{array}{lr}
\\frac{1}{\\kappa}
\\left({A\\frac{r}{r_w}+B}\\right)
& s\\neq\\kappa \\\\
\\frac{r}{\\kappa r_w}
& s=\\kappa \\end{array}\\right.
In the overlapping part, :math:`r/r_w>s_X`, the permeability is given by:
.. math:: k_h(r)=\\kappa_X/\\kappa
where :math:`A` and :math:`B` are:
.. math:: A=\\frac{\\kappa-1}{s-1}
.. math:: B=\\frac{s-\\kappa}{s-1}
.. math:: \\kappa_X= 1+\\frac{\\kappa-1}{s-1}\\left({s_X-1}\\right)
.. math:: s_X = 2n-s
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def mu_intersecting(n, s, kap):
"""mu for intersecting smear zones that do not completely overlap"""
sx = _sx(n, s)
kapx = _kapx(n, s, kap)
mu = mu_linear(n, sx, kapx) * kap / kapx
return mu
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s,1) or np.isclose(kap, 1):
permeability = np.ones_like(si, dtype=float)
elif (2*n-s <=1):
permeability = np.ones_like(si, dtype=float) / kap
elif (n>=s):
permeability = k_linear(n, s, kap, si)
else:
sx = _sx(n, s)
kapx = _kapx(n, s, kap)
smear = (si < sx)
permeability = np.ones_like(si, dtype=float)
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
permeability[smear] = 1/kap*(A*si[smear] + B)
permeability[~smear] = 1/kap*kapx#1 / kapx
return permeability
def u_ideal(n, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for ideal drain with no smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter
Returns
-------
u : float or ndarray of float
Pore pressure at specified si
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution is given by:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{r}{r_w}}\\right)
-\\frac{(r/r_w)^2-1}{2n^2}
+\\mu_w
}\\right]+u_w
where:
.. math:: n = \\frac{r_e}{r_w}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius.
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
n = np.asarray(n)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
mu = mu_ideal(n)
term1 = (uavg - uw) / (mu + muw)
term2 = log(si) - 1 / (2 * n**2) * (si**2 - 1) + muw
u = term1 * term2 + uw
return u
def u_constant(n, s, kap, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for constant permeability smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
Returns
-------
u : float or ndarray of float
Pore pressure at specified si
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution
in the smear zone is given by:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa\\left({
\\ln\\left({s_i}\\right)
-\\frac{1}{2n^2}\\left({s_i^2-1}\\right)
}\\right)
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
-\\frac{1}{2n^2}\\left({s_i^2-s^2}\\right)
+\\kappa\\left[{
\\ln\\left({s}\\right)
-\\frac{1}{2n^2}\\left({s^2-1}\\right)
}\\right]
+\\mu_w
}\\right]+u_w
where:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Hansbo, S. 1981. 'Consolidation of Fine-Grained Soils by
Prefabricated Drains'. In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
"""
def constant_part(n, s, kap, si):
"""u in smear zone with constant permeability i.e from si=1 to si=s"""
term2 = log(si) - 1 / (2 * n ** 2) * (si ** 2 - 1)
u = kap * term2
return u
def undisturbed_part(n, s, kap, si):
"""u outside of smear zone with constant permeability i.e from si=1 to si=s"""
term4 = (log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
+ kap * (log(s) - 1 / (2 * n ** 2) * (s ** 2 - 1)))
u = term4
return u
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s, 1) or np.isclose(kap, 1):
return u_ideal(n, si, uavg, uw, muw)
mu = mu_constant(n, s, kap)
term1 = (uavg - uw) / (mu + muw)
term2 = np.empty_like(si, dtype=float)
smear = (si < s)
term2[smear] = constant_part(n, s, kap, si[smear])
term2[~smear] = undisturbed_part(n, s, kap, si[~smear])
u = term1 * (term2 + muw) + uw
return u
def u_linear(n, s, kap, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for linear smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
Returns
-------
u : float or ndarray of float
Pore pressure at specified si.
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution
in the smear zone is given by:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa\\left({\\frac{1}{B}\\ln\\left({s_i}\\right)
+\\left({\\frac{B}{A^2n^2}-\\frac{1}{B}}\\right)
\\ln\\left({B+As_i}\\right)
+\\frac{1-s_i}{An^2}
}\\right)
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
-\\frac{s_i^2-s^2}{2n^2}
+\\kappa
\\left[{
\\frac{1}{B}\\ln\\left({s}\\right)
+\\left({\\frac{B}{A^2n^2}-\\frac{1}{B}}\\right)
\\ln\\left({\\kappa}\\right)
+\\frac{1-s}{An^2}
}\\right]
+\\mu_w
}\\right]+u_w
for the special case where :math:`s=\\kappa` the pore pressure
in the undisturbed zone is:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
s\\frac{\\left({n^2-s_i}\\right)
\\left({s_i-1}\\right)}{n^2s_i}
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
+s-1+\\frac{s}{n^2}
-\\frac{s_i^2-s^2}{2n^2}
+\\mu_w
}\\right]+u_w
where:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
If :math:`s=1` or :math:`\\kappa=1` then u_ideal will be used.
References
----------
.. [1] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
def linear_part(n, s, kap, si):
"""u in smear zone with linear permeability i.e from si=1 to si=s"""
if np.isclose(s, kap):
term2 = -1 / si - 1 / n ** 2 * (si - 1) + 1
u = kap * term2
return u
else:
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
term2 = log(si) - log(A * si + B)
term3 = A * si + B - 1 - B * log(A * si + B)
u = (1 / B * term2 - 1 / (n ** 2 * A ** 2) * term3)
return kap * u
return u
def undisturbed_part(n, s, kap, si):
"""u outside of smear zone with linear permeability i.e from si=1 to si=s"""
if np.isclose(s, kap):
term2 = log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
term3 = -1 / s - 1 / n ** 2 * (s - 1) + 1
u = (term2 + kap * term3)
return u
else:
A = (kap - 1) / (s - 1)
B = (s - kap) / (s - 1)
term2 = log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
term3 = (1 / B * log(s / kap) - 1 / (n ** 2 * A ** 2) *
(kap - 1 - B * log(kap)))
u = (term2 + kap * term3)
return u
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s, 1) or np.isclose(kap, 1):
return u_ideal(n, si, uavg, uw, muw)
mu = mu_linear(n, s, kap)
term1 = (uavg - uw) / (mu + muw)
term2 = np.empty_like(si, dtype=float)
smear = (si < s)
term2[smear] = linear_part(n, s, kap, si[smear])
term2[~smear] = undisturbed_part(n, s, kap, si[~smear])
u = term1 * (term2 + muw) + uw
return u
def u_parabolic(n, s, kap, si, uavg=1, uw=0, muw=0):
"""Pore pressure at radius for parabolic smear zone
Parameters
----------
n : float
Ratio of drain influence radius to drain radius (re/rw).
s : float
Ratio of smear zone radius to drain radius (rs/rw).
kap : float
Ratio of undisturbed horizontal permeability to permeability at
the drain-soil interface (kh / ks).
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
Returns
-------
u : float of ndarray of float
Pore pressure at specified si.
Notes
-----
The uavg is calculated from the eta method. It is not the uavg used when
considering the vacuum as an equivalent surcharge. You would have to do
other manipulations for that.
Noteing that :math:`s_i=r_i/r_w`, the radial pore pressure distribution
in the smear zone is given by:
.. math:: u^\\prime(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\frac{\\kappa}{\\kappa-1}\\left\\{{
\\frac{1}{A^2-B^2}
\\left({
\\ln\\left({s_i}\\right)
-\\frac{1}{2A}
\\left[{
\\left({A-B}\\right)F
+\\left({A+B}\\right)G
}\\right]
}\\right)
+\\frac{1}{2n^2AC}
\\left[{
\\left({A+B}\\right)F
+\\left({A-B}\\right)G
}\\right]
}\\right\\}
+\\mu_w
}\\right]+u_w
The pore pressure in the undisturbed zone is:
.. math:: u(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\ln\\left({\\frac{s_i}{s}}\\right)
-\\frac{s_i^2-s^2}{2n^2}
+A^2
\\left[{
\\frac{1}{A^2-B^2}
\\left({
\\ln\\left({s}\\right)
-\\frac{1}{2}\\left[{
\\ln\\left({\\kappa}\\right)
+\\frac{BE}{A}}\\right]
}\\right)
+\\frac{1}{2n^2C^2}
\\left({\\ln\\left({\\kappa}\\right)
-\\frac{BE}{A}}\\right)
}\\right]
+\\mu_w
}\\right]+u_w
where :math:`A`, :math:`B`, :math:`C`, :math:`E`, :math:`F`, and
:math:`G` are:
.. math:: A=\\sqrt{\\frac{\\kappa}{\\kappa-1}}
.. math:: B=\\frac{s}{s-1}
.. math:: C=\\frac{1}{s-1}
.. math:: E=\\ln\\left({\\frac{A+1}{A-1}}\\right)
.. math:: F(r/r_w) = \\ln\\left({\\frac{A+B-Cs_i}{A+1}}\\right)
.. math:: G(r/r_w) = \\ln\\left({\\frac{A-B+Cs_i}{A-1}}\\right)
and:
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability
References
----------
.. [1] Walker, Rohan, and Buddhima Indraratna. 2006. 'Vertical Drain
Consolidation with Parabolic Distribution of Permeability in
Smear Zone'. Journal of Geotechnical and Geoenvironmental
Engineering 132 (7): 937-41.
doi:10.1061/(ASCE)1090-0241(2006)132:7(937).
"""
def parabolic_part(n, s, kap, si):
"""u in smear zone with parabolic permeability i.e from si=1 to si=s"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
E = log((A + 1)/(A - 1))
F = log((A + B - C * si) / (A + 1))
G = log((A - B + C * si) / (A - 1))
term1 = kap / (kap - 1)
term2 = 1 / (A ** 2 - B ** 2)
term3 = log(si)
term4 = -1 / (2 * A)
term5 = (A - B) * F + (A + B) * G
term6 = term2 * (term3 + term4 * term5)
term7 = 1 / (2 * n ** 2 * A * C ** 2)
term8 = (A + B) * F + (A - B) * G
term9 = term7 * term8
u = term1 * (term6 + term9)
return u
def undisturbed_part(n, s, kap, si):
"""u outside of smear zone with parabolic permeability i.e from si=1 to si=s"""
A = sqrt((kap / (kap - 1)))
B = s / (s - 1)
C = 1 / (s - 1)
E = log((A + 1)/(A - 1))
term1 = 1
term2 = log(si / s) - 1 / (2 * n ** 2) * (si ** 2 - s ** 2)
term3 = 1 / (A ** 2 - B ** 2)
term4 = log(s) - 1 / 2 * (log(kap) + B / A * E)
term5 = 1 / (2 * n ** 2 * C ** 2)
term6 = (log(kap) - B / A * E)
term7 = kap / (kap - 1) * (term3 * term4 + term5 * term6)
u = term1 * (term2 + term7)
return u
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
if n<=1.0:
raise ValueError('n must be greater than 1. You have n = {}'.format(
n))
if s<1.0:
raise ValueError('s must be greater than 1. You have s = {}'.format(
s))
if kap<=0.0:
raise ValueError('kap must be greater than 0. You have kap = '
'{}'.format(kap))
if s>n:
raise ValueError('s must be less than n. You have s = '
'{} and n = {}'.format(s, n))
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= n)')
if np.isclose(s, 1) or np.isclose(kap, 1):
return u_ideal(n, si, uavg, uw, muw)
mu = mu_parabolic(n, s, kap)
term1 = (uavg - uw) / (mu + muw)
term2 = np.empty_like(si, dtype=float)
smear = (si < s)
term2[smear] = parabolic_part(n, s, kap, si[smear])
term2[~smear] = undisturbed_part(n, s, kap, si[~smear])
u = term1 * (term2 + muw) + uw
return u
def u_piecewise_constant(s, kap, si, uavg=1, uw=0, muw=0, n=None, kap_m=None):
"""Pore pressure at radius for piecewise constant permeability distribution
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
u : float of ndarray of float
Pore pressure at specified si.
Notes
-----
The pore pressure in the ith segment is given by:
.. math:: u_i(r) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa_i\\left({\\ln\\left({\\frac{r}{r_{i-1}}}\\right)
-\\frac{r^2/r_0^2-s_{i-1}^2}{2n^2}}\\right)
+\\psi_i+\\mu_w
}\\right]+u_w
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa_j
\\left[{
\\ln
\\left({
\\frac{s_j}{s_{j-1}}
}\\right)
-\\frac{s_j^2-s_{j-1}^2}{2n^2}
}\\right]
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
References
----------
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, Rohan T. 2011. 'Vertical Drain Consolidation Analysis in
One, Two and Three Dimensions'. Computers and
Geotechnics 38 (8): 1069-77. doi:10.1016/j.compgeo.2011.07.006.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
n = s[-1]
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= s[-1])')
s_ = np.ones_like(s)
s_[1:] = s[:-1]
u = np.empty_like(si, dtype=float )
segment = np.searchsorted(s, si)
mu = mu_piecewise_constant(s, kap)
term1 = (uavg - uw) / (mu + muw)
for ii, i in enumerate(segment):
sumj = 0
for j in range(i):
sumj += (kap[j] * (log(s[j] / s_[j])
- 0.5 * (s[j] ** 2 / n ** 2 - s_[j] ** 2 / n ** 2)))
sumj = sumj / kap[i]
u[ii] = kap[i] * (
log(si[ii] / s_[i])
- 0.5 * (si[ii] ** 2 / n ** 2 - s_[i] ** 2 / n ** 2)
+ sumj
) + muw
u *= term1
u += uw
return u
def u_piecewise_linear(s, kap, si, uavg=1, uw=0, muw=0, n=None, kap_m=None):
"""Pore pressure at radius for piecewise constant permeability distribution
Parameters
----------
s : list or 1d ndarray of float
Ratio of radii to drain radius (r_i/r_0). The first value
of s should be 1, i.e. at the drain soil interface.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability at each
value of s.
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
Returns
-------
u : float or ndarray of float
Pore pressure at specified si.
Notes
-----
With permeability in the ith segment defined by:
.. math:: \\frac{k_i}{k_{ref}}= \\frac{1}{\\kappa_{i-1}}
\\left({A_ir/r_w+B_i}\\right)
.. math:: A_i = \\frac{\\kappa_{i-1}/\\kappa_i-1}{s_i-s_{i-1}}
.. math:: B_i = \\frac{s_i-s_{i-1}\\kappa_{i-1}/\\kappa_i}{s_i-s_{i-1}}
The pore pressure in the ith segment is given by:
.. math:: u_i(s) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\sum\\limits_{i=1}^{m}\\kappa_{i-1}\\phi_i
+ \\Psi_i
+\\mu_w
}\\right]+u_w
where,
.. math:: \\phi_i = \\left\\{
\\begin{array}{lr}
\\ln\\left[{\\frac{s}{s_{i-1}}}\\right]
- \\frac{s^2- s_{i-1}^2}{2n^2}
& \\textrm{for } \\frac{\\kappa_{i-1}}{\\kappa_i}=1 \\\\
\\frac{\\left({s - s_{i-1}}\\right)
\\left({n^2-ss_{i-1}}\\right)}{sn^2}
& \\textrm{for }\\frac{\\kappa_{i-1}}{\\kappa_i}=
\\frac{s_i}{s_{i-1}} \\\\
\\begin{multline}
\\frac{1}{B_i}\\ln\\left[{\\frac{s}{s_{i-1}}}\\right]
+\\ln\\left[{A_is+B_i}\\right]
\\left({\\frac{B_i}{A_i^2n^2}-\\frac{1}{B_i}}\\right)
\\\\-\\frac{s-s_{i-1}}{A_i^2n^2}
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
.. math:: \\Psi_i = \\sum\\limits_{j=1}^{i-1}\\kappa_{j-1}\\psi_j
.. math:: \\psi_i = \\left\\{
\\begin{array}{lr}
\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
- \\frac{s_j^2- s_{j-1}^2}{2n^2}
& \\textrm{for } \\frac{\\kappa_{j-1}}{\\kappa_j}=1 \\\\
\\frac{\\left({s_j - s_{j-1}}\\right)
\\left({n^2-s_js_{j-1}}\\right)}{s_jn^2}
& \\textrm{for }\\frac{\\kappa_{j-1}}{\\kappa_j}=
\\frac{s_j}{s_{j-1}} \\\\
\\begin{multline}
\\frac{1}{B_i}\\ln\\left[{\\frac{s_j}{s_{j-1}}}\\right]
+\\ln\\left[{\\frac{\\kappa_{j-1}}{\\kappa_j}}\\right]
\\left({\\frac{B_j}{A_j^2n^2}-\\frac{1}{B_j}}\\right)
\\\\-\\frac{s_j-s_{j-1}}{A_j^2n^2}
\\end{multline}
& \\textrm{otherwise}
\\end{array}\\right.
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{ref}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the radius of the ith radial point,
:math:`k_{ref}` is a convienient refernce permeability, usually
the undisturbed
horizontal permeability,
:math:`k_{hi}` is the horizontal
permeability at the ith radial point
References
----------
Derived by Rohan Walker in 2011 and 2014.
Derivation steps are the same as for mu_piecewise_constant in appendix of
[1]_ but permeability is linear in a segemetn as in [2]_.
.. [1] Walker, Rohan. 2006. 'Analytical Solutions for Modeling Soft Soil
Consolidation by Vertical Drains'. PhD Thesis, Wollongong, NSW,
Australia: University of Wollongong. http://ro.uow.edu.au/theses/501
.. [2] Walker, R., and B. Indraratna. 2007. 'Vertical Drain Consolidation
with Overlapping Smear Zones'. Geotechnique 57 (5): 463-67.
doi:10.1680/geot.2007.57.5.463.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) < 0):
raise ValueError('s must increase left to right. you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
n = s[-1]
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= s[-1])')
s_ = np.ones_like(s)
s_[1:] = s[:-1]
u = np.empty_like(si, dtype=float)
segment = np.searchsorted(s, si)
segment[segment==0] = 1 # put si=1 in first segment
mu = mu_piecewise_linear(s, kap)
term1 = (uavg - uw) / (mu + muw)
for ii, i in enumerate(segment):
#phi
if np.isclose(kap[i-1]/kap[i], 1.0):
phi = log(si[ii]/s[i-1]) - (si[ii]**2 - s[i-1]**2)/(2 * n**2)
elif np.isclose(kap[i-1]/kap[i], s[i]/s[i-1]):
phi = (si[ii]-s[i-1]) * (n**2 - s[i-1]*si[ii]) / (si[ii] * n**2)
else:
A = (kap[i-1] / kap[i] - 1) / (s[i] - s[i-1])
B = (s[i] - s[i-1] * kap[i-1] / kap[i])/ (s[i] - s[i-1])
phi = (1/B * log(si[ii]/s[i-1])
+ (B/A**2/n**2 - 1/B) * log(A*si[ii] + B)
- (si[ii]-s[i-1])/A/n**2)
psi = 0
for j in range(1, i):
if np.isclose(s[j - 1], s[j]):
pass
elif np.isclose(kap[j-1]/kap[j], 1.0):
psi += kap[j-1]*(log(s[j]/s[j-1]) - (s[j]**2 - s[j-1]**2)/(2 * n**2))
elif np.isclose(kap[j-1]/kap[j], s[j]/s[j-1]):
psi += kap[j-1]*((s[j]-s[j-1]) * (n**2 - s[j-1]*s[j]) / (s[j] * n**2))
else:
A = (kap[j-1] / kap[j]-1) / (s[j] - s[j-1])
B = (s[j] - s[j-1] * kap[j-1] / kap[j])/ (s[j] - s[j-1])
psi += kap[j-1]*((1/B * log(s[j]/s[j-1])
+ (B/A**2/n**2 - 1/B) * log(A*s[j] + B)
- (s[j]-s[j-1])/A/n**2))
u[ii]=kap[i-1] * phi + psi + muw
u *= term1
u += uw
return u
def re_from_drain_spacing(sp, pattern = 'Triangle'):
"""Calculate drain influence radius from drain spacing
Parameters
----------
sp : float
Distance between drain centers.
pattern : ['Triangle', 'Square'], optional
Drain installation pattern. default = 'Triangle'.
Returns
-------
re : float
drain influence radius
Notes
-----
The influence radius, :math:`r_e`, is given by:
.. math:: r_e =
\\left\\{\\begin{array}{lr}
S_p \\frac{1}{\\sqrt{\\pi}}=S_p\\times 0.564189583
& \\textrm{square pattern}\\\\
S_p \\sqrt{\\frac{\\sqrt{3}}{2\\pi}}=S_p\\times 0.525037567
& \\textrm{triangular pattern}
\\end{array}\\right.
References
----------
Eta method is described in [1]_.
.. [1] Walker, Rohan T. 2011. 'Vertical Drain Consolidation Analysis in
One, Two and Three Dimensions'. Computers and
Geotechnics 38 (8): 1069-77. doi:10.1016/j.compgeo.2011.07.006.
"""
if np.any(np.atleast_1d(sp) <= 0):
raise ValueError('sp must be greater than zero. '
'You have sp={}'.format(sp))
if pattern[0].upper()=='T':
re = 0.525037567904332 * sp # factor = (3**0.5/2/np.pi)**0.5
elif pattern[0].upper()=='S':
re = 0.5641895835477563 * sp #factor = 1 / np.pi**0.5
else:
raise ValueError("pattern must begin with 'T' for triangular "
" or 'S' for square. You have pattern="
"{}".format(pattern))
return re
def drain_eta(re, mu_function, *args, **kwargs):
"""Calculate the vertical drain eta parameter for a specific smear zone
eta = 2 / re**2 / (mu+muw)
eta is used in radial consolidation equations u= u0 * exp(-eta*kh/gamw*t)
Parameters
----------
re : float
Drain influence radius.
mu_function : obj or string
The mu_funtion to use. e.g. mu_ideal, mu_constant, mu_linear,
mu_overlapping_linear, mu_parabolic, mu_piecewise_constant,
mu_piecewise_linear. This can either be the function object itself
or the name of the function e.g. 'mu_ideal'.
muw : float, optional
Well resistance mu term, default=0.
*args, **kwargs : various
The arguments to pass to the mu_function.
Returns
-------
eta : float
Value of eta parameter
Examples
--------
>>> drain_eta(1.5, mu_ideal, 10)
0.563178340433...
>>> drain_eta(1.5, 'mu_ideal', 10)
0.5631783404334...
>>> drain_eta(1.5, mu_constant, 5, 1.5, 1.6, muw=1)
0.4115837724144...
"""
try:
mu_fn = globals()[mu_function]
except KeyError:
mu_fn = mu_function
muw = kwargs.pop('muw', 0)
eta = 2 / re**2 / (mu_fn(*args, **kwargs)+muw)
return eta
def back_calc_drain_spacing_from_eta(eta, pattern, mu_function, rw, s, kap, muw=0):
"""Back calculate the required drain spacing to achieve a given eta
eta = 2 / re**2 / (mu + muw)
eta is used in radial consolidation equations u= u0 * exp(-eta*kh/gamw*t)
Parameters
----------
eta : float
eta value.
pattern : ['Triangle', 'Square']
Drain installation pattern.
mu_function : obj
The mu_funtion to use. e.g. mu_ideal, mu_constant, mu_linear,
mu_overlapping_linear, mu_parabolic, mu_piecewise_constant,
mu_piecewise_linear.
rw : float
Drain/well radius.
s : float or 1d array_like of float
Ratio of smear zone radius to drain radius (rs/rw). s can only be
a 1d array is using a mu_piecewise function
kap : float or 1d array_like of float
Ratio of undisturbed horizontal permeability to permeability at
in smear zone (kh / ks) (often at the drain-soil interface). Be
careful when defining s and kap for mu_piecewise_constant, and
mu_piecewise_linear because the last value of kap will be used at
the influence drain periphery. In general the last value of kap
should be one, representing the start of the undisturbed zone.
muw : float, optional
Well resistance mu term, default=0.
Returns
-------
sp : float
Drain spacing to get the required eta value
re : float
Drain influence radius
n : float
Ratio of drain influence radius to drain radius, re/rw
Notes
-----
When using mu_piecewise_linear or mu_piecewise_constant only define s and
kap up to the start of the undisturbed zone. re will be varied.
For anyting other than mu_overlapping_linear do not trust any returned
spacing that gives an n value less than the extent of the smear zone.
"""
def calc_eta(sp, eta, rw, s, kap, mu_function, pattern, muw=0):
"""eta from a given spacing value
used in root finding
"""
re = re_from_drain_spacing(sp, pattern)
n = re/rw
if mu_function != mu_ideal:
if n < np.max(s):
if mu_function != mu_overlapping_linear:
raise ValueError('In determining required drain '
'spacing, n has fallen '
'below s. s={}, n={}'.format(np.max(s), n))
if mu_function in [mu_piecewise_constant, mu_piecewise_linear]:
eta_ = drain_eta(re, mu_function, s, kap, n = n, muw = muw)
else:
eta_ = drain_eta(re, mu_function, n, s, kap, muw=muw)
return eta_ - eta
from scipy.optimize import fsolve
if not mu_function in [mu_piecewise_constant, mu_piecewise_linear]:
if len(np.atleast_1d(s))>1:
raise ValueError('for mu_function={}, you cannot have multiple '
'values for s. s={}'.format(mu_function.__name__, s))
if len(np.atleast_1d(kap))>1:
raise ValueError('for mu_function={}, you cannot have multiple '
'values for kap. kap={}'.format(mu_function.__name__, kap))
x0 = rw * np.max(s) / 0.5 * 2 # this ensures guess is beyond smear zone
calc_eta(x0, eta, rw, s, kap, mu_function, pattern, muw )
sp = fsolve(calc_eta, x0,
args=(eta, rw, s, kap, mu_function, pattern, muw))
re = re_from_drain_spacing(sp[0], pattern)
n = re/rw
if mu_function != mu_ideal:
if n < np.max(s):
if mu_function != mu_overlapping_linear:
raise ValueError('calculated spacing results in n<s. s={}, n={}'.format(np.max(s), n))
return sp[0], re, n
def _g(r_rw, re_rw, nflow=1.0001, nterms=20):
"""Non-darcian equal strain radial consolidation term
Parameters
----------
r_rw : float
Ratio of radial coordinate to drain radius (r/rw).
re_rw : float
Ratio of drain influence radius to drain readius (re/rw).
You will often see this ratio expressed as re/re=n. However, this is
confusing with the non-darcian flow exponent.
nflow : float, optional
Non-Darcian flow exponent. Default nflow=1.0001 i.e. darcian flow.
Using nflow=1 will result in an error.
nterms : int, optional
Number of summation terms. Default nterms=20.
Returns
-------
g : float
Non-darcian equal strain radial consolidation term.
Notes
-----
The 'g' function arises in the derivation of equal strain radial
consolidation equations under non-Darcian flow.
We only concern ourselves with the exponential part of Hansbo's
Non-darcian flow relationship:
.. math::
v=k^{\\ast}i^{n}
where,
:math:`k^{\\ast}` is a peremability, :math:`i` is hydraulic gradient and
:math:`n` is the flow exponent.
The expression :math:`g\\left({y}\\right)` is given below. :math:`y` is
the ratio of radial coordinate :math:`r` to drain radius :math:`r_w`,
:math:`y=r/r_w`. :math:`N` is the ratio of influence radius :math:`r_e`
to drain radius :math:`r_w`, :math:`N=r_e/r_w`.
.. math::
g\\left({y}\\right)=
ny^{1-1/n}\sum\limits_{j=0}^\\infty
\\frac{\\left\\{{-1/n}\\right\\}_j}
{j!\\left({\\left({2j+1}\\right)n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2j}
:math:`\\left\\{x\\right\\}_m` is the Pochhammer symbol or rising
factorial given by:
.. math::
\\left\\{x\\right\\}_m = x
\\left({x+1}\\right)
\\left({x+2}\\right)
\\dots
\\left({x+m-1}\\right)
.. math::
\\left\\{x\\right\\}_0=1
Alterantely a recurrance relatoin can be formed:
.. math::
g\\left({y}\\right)=
\sum\limits_{j=0}^{\\infty} a_j
where,
.. math::
a_0=\\frac{n}{n-1}y^{1-1/n}
.. math::
a_j = a_{j-1}
\\frac{\\left({jn-n-1}\\right)\\left({2jn-n-1}\\right)}
{nj\\left({2jn+n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2}
Examples
--------
>>> _g(10.0, 50.0, nflow=1.2)
8.7841...
>>> _g(10.0, 20.0, nflow=1.2)
8.664...
>>> _g(2, 50.0, nflow=1.01)
101.694...
>>> _g(5, 5, nflow=1.01)
102.120...
>>> _g(10.0, np.array([50.0,20]), nflow=1.2)
array([8.7841..., 8.664...])
See Also
--------
_gbar : multiply _g by y and integrate w.r.t y
"""
r_rw = np.asarray(r_rw)
re_rw = np.asarray(re_rw)
nflow = np.asarray(nflow)
if np.any(r_rw < 1):
raise ValueError('r_rw must be greater or equal to 1. '
'You have r_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(r_rw)])))
if np.any(re_rw <= 1):
raise ValueError('re_rw must be greater than 1. '
'You have re_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(re_rw)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
if np.any([len(np.asarray(v).shape)>0 for v in
[r_rw, re_rw, nflow, nterms]]):
#array inputs, use series loop
g = 0
term1 = nflow * r_rw**(1-1.0/nflow)
for j in range(nterms):
term2 = special.poch(-1.0 / nflow, j)
term3 = np.math.factorial(j)
term4 = (2 * j + 1) * nflow - 1
term5 = (r_rw / re_rw)**(2 * j)
g += term2 / term3 / term4 * term5
g *= term1
return g
else:
#scalar inputs, use recursion relationship
a = np.zeros(nterms)
a[0] = nflow / (nflow - 1.0) * r_rw**(1.0 - 1.0 / nflow)
j = np.arange(1, nterms)
a[1:] = (r_rw / re_rw)**2
a[1:] *= (j * nflow - nflow - 1)
a[1:] *= (2* j * nflow - nflow - 1)
a[1:] /= nflow * j * (2 * j * nflow + nflow - 1)
np.cumprod(a, out=a)
g=np.sum(a)
return g
def _gbar(r_rw, re_rw, nflow=1.0001, nterms=20):
"""Non-darcian equal strain radial consolidation term
_g expression multiplied by y and integrated w.r.t. y
Parameters
----------
r_rw : float
Ratio of radial coordinate to drain radius (r/rw).
re_rw : float
Ratio of drain influence radius to drain readius (re/rw).
You will often see this ratio expressed as re/re=n. However, this is
confusing with the non-darcian flow exponent.
nflow : int, optional
Non-Darcian flow exponent. Default nflow=1.0001 i.e. darcian flow.
Using nflow=1 will result in an error.
nterms : float, optional
Number of summation terms. Default nterms=20.
Returns
-------
gbar : float
Non-darcian equal strain radial consolidation term.
Notes
-----
The 'gbar' (bar stands for overbar) function arises in the derivation
of equal strain radial consolidation equations under non-Darcian flow.
We only concern ourselves with the exponential part of Hansbo's
Non-darcian flow relationship:
.. math::
v=k^{\\ast}i^{n}
where,
:math:`k^{\\ast}` is a peremability, :math:`i` is hydraulic gradient and
:math:`n` is the flow exponent.
The expression :math:`g\\left({y}\\right)` is given below. :math:`y` is
the ratio of radial coordinate :math:`r` to drain radius :math:`r_w`,
:math:`y=r/r_w`. :math:`N` is the ratio of influence radius :math:`r_e`
to drain radius :math:`r_w`, :math:`N=r_e/r_w`.
.. math::
\\overline{g}\\left({y}\\right)=
n^2y^{3-1/n}\sum\limits_{j=0}^\\infty
\\frac{\\left\\{{-1/n}\\right\\}_j}
{j!\\left({\\left({2j+1}\\right)n-1}\\right)
\\left({\\left({2j+3}\\right)n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2j}
:math:`\\left\\{x\\right\\}_m` is the Pochhammer symbol or rising
factorial given by:
.. math::
\\left\\{x\\right\\}_m = x
\\left({x+1}\\right)
\\left({x+2}\\right)
\\dots
\\left({x+m-1}\\right)
.. math::
\\left\\{x\\right\\}_0=1
Alterantely a recurrance relatoin can be formed:
.. math::
\\overline{g}\\left({y}\\right)=
\sum\limits_{j=0}^{\\infty} a_j
where,
.. math::
a_0=\\frac{n^2}{\\left({n-1}\\right)\\left({3n-1}\\right)}y^{3-1/n}
.. math::
a_j = a_{j-1}
\\frac{\\left({jn-n-1}\\right)\\left({2jn-n-1}\\right)}
{nj\\left({2jn+3n-1}\\right)}
\\left({\\frac{y}{N}}\\right)^{2}
Examples
--------
>>> _gbar(10.0, 50.0, nflow=1.2)
405.924...
>>> _gbar(10.0, 20.0, nflow=1.2)
403.0541...
>>> _gbar(2, 50.0, nflow=1.01)
202.3883...
>>> _gbar(5, 5, nflow=1.01)
1273.3329...
>>> _gbar(10.0, np.array([50.0,20]), nflow=1.2)
array([405.924..., 403.0541...])
See Also
--------
_g : earlier step in derivation of `_gbar`.
"""
r_rw = np.asarray(r_rw)
re_rw = np.asarray(re_rw)
nflow = np.asarray(nflow)
if np.any(r_rw < 1):
raise ValueError('r_rw must be greater or equal to 1. '
'You have r_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(r_rw)])))
if np.any(re_rw <= 1):
raise ValueError('re_rw must be greater than 1. '
'You have re_rw = {}'.format(
', '.join([str(v) for v in np.atleast_1d(re_rw)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
if np.any([len(np.asarray(v).shape)>0 for v in
[r_rw, re_rw, nflow, nterms]]):
#array inputs, use series loop
gbar = 0
term1 = nflow**2 * r_rw**(3 - 1.0/nflow)
for j in range(nterms):
term2 = special.poch(-1.0 / nflow, j)
term3 = np.math.factorial(j)
term4 = (2 * j + 1) * nflow - 1
term4a = (2 * j + 3) * nflow - 1
term5 = (r_rw/re_rw)**(2*j)
gbar += term2/term3/term4/term4a*term5
gbar *= term1
return gbar
else:
a = np.zeros(nterms)
a[0] = nflow**2 / (nflow - 1.0)/(3 * nflow - 1.0) * r_rw**(3.0 - 1.0 / nflow)
j = np.arange(1, nterms)
a[1:] = (r_rw / re_rw)**2
a[1:] *= (j * nflow - nflow - 1)
a[1:] *= (2* j * nflow - nflow - 1)
a[1:] /= nflow * j * (2 * j * nflow + 3 * nflow - 1)
np.cumprod(a, out=a)
gbar=np.sum(a)
return gbar
def non_darcy_beta_ideal(n, nflow=1.0001, nterms=20, *args):
"""Non-darcian flow smear zone permeability/geometry parameter for
ideal drain (no smear).
beta parameter is in equal strain radial consolidation equations with
non-Darcian flow.
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
args : anything
`args` does not contribute to any calculations it is merely so you
can have other arguments such as s and kappa which are used in other
smear zone formulations.
Returns
-------
beta : float
Smear zone permeability/geometry parameter.
Notes
-----
.. math::
\\beta = \\frac{1}{N^2-1}
\\left({
2\\overline{g}\\left({N}\\right)
-2\\overline{g}\\left({1}\\right)
-g\\left({1}\\right) \\left({N^2-1}\\right)
}\\right)
:math:`g\\left({y}\\right)` and :math:`\\overline{g}\\left({y}\\right)`
are described in the `_g` and `_gbar` functions respectively.
.. math:: n = \\frac{r_e}{r_w}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius.
Examples
--------
>>> non_darcy_beta_ideal(20, 1.000001, nterms=20)
2.2538...
>>> non_darcy_beta_ideal(np.array([20, 10]), 1.000001, nterms=20)
array([2.253..., 1.578...])
>>> non_darcy_beta_ideal(15, 1.3)
2.618...
>>> non_darcy_beta_ideal(np.array([20, 15]), np.array([1.000001,1.3]), nterms=20)
array([2.253..., 2.618...])
See Also
--------
_g : used in this function.
_gbar : used in this function.
References
----------
.. [1] Hansbo, S. 1981. "Consolidation of Fine-Grained Soils by
Prefabricated Drains". In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
.. [2] Walker, R., B. Indraratna, and C. Rujikiatkamjorn.
"Vertical Drain Consolidation with Non-Darcian Flow and
Void-Ratio-Dependent Compressibility and Permeability."
Geotechnique 62, no. 11 (November 1, 2012): 985-97.
doi:10.1680/geot.10.P.084.
"""
n = np.asarray(n)
nflow = np.asarray(nflow)
if np.any(n <= 1):
raise ValueError('n must be greater than 1. '
'You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
# if nflow==1:
# raise ValueError('nflow must not be 1.')
## if n <= 1:
## raise ValueError('n must be greater than 1. You have n = {}'.format(
## n))
# if np.any(n <= 1):
# raise ValueError('n must be greater than 1. You have n = {}'.format(
# ', '.join([str(v) for v in np.atleast_1d(n)])))
# beta = _g(1, n, nflow, nterms)
# beta *= n**2 - 1
# beta += 2 * _gbar(n, n, nflow, nterms)
# beta -= 2 * _gbar(1, n, nflow, nterms)
# beta /= n**2 - 1
# g = _g2
# gbar = _gbar2
beta = -_g(1, n, nflow, nterms)
beta *= n**2 - 1
beta += 2 * _gbar(n, n, nflow, nterms)
beta -= 2 * _gbar(1, n, nflow, nterms)
beta /= n**2 - 1
return beta
def non_darcy_beta_constant(n, s, kap, nflow=1.0001, nterms=20, *args):
"""Non-darcian flow smear zone permeability/geometry parameter for
smear zone with constant permeability.
beta parameter is in equal strain radial consolidation equations with
non-Darcian flow.
Parameters
----------
n : float or ndarray of float
Ratio of drain influence radius to drain radius (re/rw).
s : float or ndarray of float
Ratio of smear zone radius to drain radius (rs/rw)
kap : float or ndarray of float.
Ratio of undisturbed horizontal permeability to smear zone
horizontal permeanility (kh / ks).
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
args : anything
`args` does not contribute to any calculations it is merely so you
can have other arguments such as s and kappa which are used in other
smear zone formulations.
Returns
-------
beta : float
Smear zone permeability/geometry parameter.
Notes
-----
.. math::
\\beta = \\frac{1}{N^2-1}
\\left({
\\begin{multline}
2\\overline{g}\\left({N}\\right)
-\\kappa^{1/n}\\left({
2\\overline{g}\\left({1}\\right)
+ g\\left({1}\\right) \\left({N^2-1}\\right)
}\\right) \\\\
+\\left({\\kappa^{1/n}-1}\\right)\\left({
2\\overline{g}\\left({s}\\right)
+ g\\left({s}\\right) \\left({N^2-s^2}\\right)
}\\right)
\\end{multline}
}\\right)
:math:`g\\left({y}\\right)` and :math:`\\overline{g}\\left({y}\\right)`
are described in the `_g` and `_gbar` functions respectively.
.. math:: n = \\frac{r_e}{r_w}
.. math:: s = \\frac{r_s}{r_w}
.. math:: \\kappa = \\frac{k_h}{k_s}
:math:`r_w` is the drain radius, :math:`r_e` is the drain influence radius,
:math:`r_s` is the smear zone radius, :math:`k_h` is the undisturbed
horizontal permeability, :math:`k_s` is the smear zone horizontal
permeability.
Examples
--------
>>> non_darcy_beta_constant(20,1,1, 1.000001, nterms=20)
2.2538...
>>> non_darcy_beta_constant(20,5,5, 1.000001, nterms=20)
8.4710...
>>> non_darcy_beta_constant(15, 5, 4, 1.3, nterms=20)
6.1150...
>>> non_darcy_beta_constant(np.array([20, 15]), 5,
... np.array([5,4]), np.array([1.000001, 1.3]), nterms=20)
array([8.471..., 6.1150...])
See Also
--------
_g : used in this function.
_gbar : used in this function.
References
----------
.. [1] Hansbo, S. 1981. "Consolidation of Fine-Grained Soils by
Prefabricated Drains". In 10th ICSMFE, 3:677-82.
Rotterdam-Boston: A.A. Balkema.
.. [2] Walker, R., B. Indraratna, and C. Rujikiatkamjorn.
"Vertical Drain Consolidation with Non-Darcian Flow and
Void-Ratio-Dependent Compressibility and Permeability."
Geotechnique 62, no. 11 (November 1, 2012): 985-97.
doi:10.1680/geot.10.P.084.
"""
n = np.asarray(n)
s = np.asarray(s)
kap = np.asarray(kap)
nflow = np.asarray(nflow)
if np.any(n <= 1):
raise ValueError('n must be greater than 1. '
'You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(n)])))
if np.any(s < 1):
raise ValueError('s must be greater or equal to 1. '
'You have n = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap < 1):
raise ValueError('kap must be greater or equal to 1. '
'You have kap = {}'.format(
', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
beta = 2 * _gbar(n, n, nflow, nterms)
beta -= kap**(1 / nflow) * (
2 * _gbar(1, n, nflow, nterms)
+ _g(1, n, nflow, nterms) * (n**2 - 1))
beta += (kap**(1 / nflow) - 1) * (
2 * _gbar(s, n, nflow, nterms)
+ _g(s, n, nflow, nterms) * (n**2 - s**2))
beta /= n**2 - 1
return beta
def non_darcy_beta_piecewise_constant(s, kap, n=None, kap_m=None,
nflow=1.0001, nterms=20, *args):
"""Non-darcian flow smear zone permeability/geometry parameter for
smear zone with piecewise constant permeability.
beta parameter is in equal strain radial consolidation equations with
non-Darcian flow.
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soil permeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
Returns
-------
beta : float
Smear zone permeability/geometry parameter.
Notes
-----
The non-darcian smear zone parameter :math:`\\beta` is given by:
.. math:: \\beta = \\frac{1}{\\left({n^2-1}\\right)}
\\sum\\limits_{i=1}^{m} \\kappa^{1/n}_i
\\left[{
2\\overline{g}\\left({s_i}\\right)
-2\\overline{g}\\left({s_{i-1}}\\right)
}\\right]
+\\psi_i \\left({s_i^2-s_{i-1}^2}\\right)
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa^{1/n}_j
\\left[{
g\\left({s_j}\\right)
-g\\left({s_{j-1}}\\right)
}\\right]
and:
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
Examples
--------
>>> mu_piecewise_constant([1.5, 3, 4],[2, 3, 1], n=5)
2.2533...
>>> non_darcy_beta_piecewise_constant(s=np.array([1.5, 3, 4]),
... kap=np.array([2, 3, 1]), n=5, nflow=1.000001)
2.2533...
References
----------
None because it is new.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
n = s[-1]
s_ = np.ones_like(s , dtype=float)
s_[1:] = s[:-1]
sumi = 0
for i in range(len(s)):
psi = 0
for j in range(i):
psi+= kap[j]**(1 / nflow) *(
_g(s[j], n, nflow, nterms)
- _g(s_[j], n, nflow, nterms)
)
psi /= kap[i]**(1 / nflow)
sumi += kap[i]**(1 / nflow) * (
2 * _gbar(s[i], n, nflow, nterms)
- 2 * _gbar(s_[i], n, nflow, nterms)
+(psi - _g(s_[i], n, nflow, nterms) )* (s[i]**2 - s_[i]**2)
)
beta = sumi / (n**2 - 1)
return beta
def non_darcy_u_piecewise_constant(s, kap, si, uavg=1, uw=0, muw=0,
n=None, kap_m=None,
nflow=1.0001, nterms=20):
"""Pore pressure at radius for piecewise constant permeability distribution
.. warning::
`muw` must always be zero. i.e. no well resistance (It exists to have
the same inputs as `u_piecewise_constant`.
Parameters
----------
s : list or 1d ndarray of float
Ratio of segment outer radii to drain radius (r_i/r_0). The first value
of s should be greater than 1, i.e. the first value should be s_1;
s_0=1 at the drain soil interface is implied.
kap : list or ndarray of float
Ratio of undisturbed horizontal permeability to permeability in each
segment kh/khi.
si : float of ndarray of float
Normalised radial coordinate(s) at which to calc the pore pressure
i.e. si=ri/rw.
uavg : float, optional = 1
Average pore pressure in soil. default = 1. when `uw`=0 , then if
uavg=1.
uw : float, optional
Pore pressure in drain, default = 0.
muw : float, optional
Well resistance mu parameter. Default = 0
n, kap_m : float, optional
If `n` and `kap_m` are given then they will each be appended to `s` and
`kap`. This allows the specification of a smear zone separate to the
specification of the drain influence radius.
Default n=kap_m=None, i.e. soilpermeability is completely described
by `s` and `kap`. If n is given but kap_m is None then the last
kappa value in kap will be used.
nflow : float, optional
non_darcian flow exponent
nterms : int, optional
Number of terms to use in series
Returns
-------
u : float of ndarray of float
Pore pressure at specified si.
Notes
-----
non_darcy_u_piecewise_constant()
The pore pressure in the ith segment is given by:
.. math:: u_i(y) = \\frac{u_{avg}-u_w}{\\mu+\\mu_w}
\\left[{
\\kappa^{1/n}_i
\\left({
g\\left({y}\\right)
-g\\left({s_{i-1}}\\right)
}\\right)
+\\psi_i
}\\right]+u_w
where,
.. math:: \\psi_{i} = \\sum\\limits_{j=1}^{i-1}\\kappa^{1/n}_j
\\left[{
g\\left({s_j}\\right)
-g\\left({s_{j-1}}\\right)
}\\right]
and:
:math:`g\\left({y}\\right)` is described in the `_g` function
.. math:: y = \\frac{r}{r_0}
.. math:: n = \\frac{r_m}{r_0}
.. math:: s_i = \\frac{r_i}{r_0}
.. math:: \\kappa_i = \\frac{k_h}{k_{hi}}
:math:`r_0` is the drain radius, :math:`r_m` is the drain influence radius,
:math:`r_i` is the outer radius of the ith segment,
:math:`k_h` is the undisturbed
horizontal permeability in the ith segment,
:math:`k_{hi}` is the horizontal
permeability in the ith segment
Examples
--------
>>> u_piecewise_constant([1.5, 3,], [2, 3], 1.6, n=5, kap_m=1)
array([0.4153...])
>>> non_darcy_u_piecewise_constant([1.5, 3,], [2, 3], 1.6, n=5, kap_m=1,
... nflow=1.0000001)
array([0.4153...])
>>> non_darcy_u_piecewise_constant([1.5, 3,], [2, 3], 1.6, n=5, kap_m=1,
... nflow=1.3)
array([0.3865...])
References
----------
none because it is new.
"""
s = np.atleast_1d(s)
kap = np.atleast_1d(kap)
if not n is None:
s_temp = np.empty(len(s) + 1, dtype=float)
s_temp[:-1] = s
s_temp[-1] = n
kap_temp = np.empty(len(kap) + 1, dtype=float)
kap_temp[:-1] = kap
if kap_m is None:
kap_temp[-1] = kap[-1]
else:
kap_temp[-1] = kap_m
s = s_temp
kap = kap_temp
if len(s)!=len(kap):
raise ValueError('s and kap must have the same shape. You have '
'lengths for s, kap of {}, {}.'.format(
len(s), len(kap)))
if np.any(s<=1.0):
raise ValueError('must have all s>=1. You have s = {}'.format(
', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(kap<=0.0):
raise ValueError('all kap must be greater than 0. You have kap = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(kap)])))
if np.any(np.diff(s) <= 0):
raise ValueError('s must increase left to right you have s = '
'{}'.format(', '.join([str(v) for v in np.atleast_1d(s)])))
if np.any(nflow <= 1):
raise ValueError('nflow must be greater than 1. '
'You have nflow = {}'.format(
', '.join([str(v) for v in np.atleast_1d(nflow)])))
n = s[-1]
si = np.atleast_1d(si)
if np.any((si < 1) | (si > n)):
raise ValueError('si must satisfy 1 >= si >= s[-1])')
s_ = np.ones_like(s)
s_[1:] = s[:-1]
u = np.empty_like(si, dtype=float )
segment = np.searchsorted(s, si)
beta = non_darcy_beta_piecewise_constant(s, kap,
nflow=nflow, nterms=nterms)
term1 = (uavg - uw) / (beta + muw)
for ii, i in enumerate(segment):
psi = 0
for j in range(i):
psi+= kap[j]**(1 / nflow) *(
_g(s[j], n, nflow, nterms)
- _g(s_[j], n, nflow, nterms)
)
psi /= kap[i]**(1 / nflow)
# sumj += (kap[j] * (log(s[j] / s_[j])
# - 0.5 * (s[j] ** 2 / n ** 2 - s_[j] ** 2 / n ** 2)))
# sumj = sumj / kap[i]
u[ii] = kap[i]**(1 / nflow) * (
_g(si[ii], n, nflow, nterms)
-_g(s_[i], n, nflow, nterms)
+ psi
) + muw
# u[ii] = kap[i] * (
# log(si[ii] / s_[i])
# - 0.5 * (si[ii] ** 2 / n ** 2 - s_[i] ** 2 / n ** 2)
# + sumj
# ) + muw
u *= term1
u += uw
return u
def non_darcy_drain_eta(re, iL, gamw, beta_function, *args, **kwargs):
"""For non-Darcy flow calculate the vertical drain eta parameter
eta = 2 / (re**2 * beta**nflow * (rw * gamw)**(nflow-1) * nflow * iL**(nflow-1))
nflow will be obtained from the **kwargs. rw will be back calculated
from the n parameter (n=re/rw) which is usually the first of the *arg parameters
or one of the **kwargs
Note that eta is used in radial consolidation equations:
[strain rate] = (u - uw)**n * k / gamw * eta
Compare with the Darcian case of (eta terms are calculated differerntly
for Darcy and non-Darcy cases):
[strain rate] = (u - uw) * k / gamw * eta
Note that `non_darcy_drain_eta` only uses the exponential portion of the
Non-Darcian flow relationship. If hydraulic gradients are greater than
iL then the flow rates will be overestimated.
Parameters
----------
re : float
Drain influence radius.
iL : float
Limiting hydraulic gradient beyond which flow follows Darcy's law.
gamw : float
Unit weight of water. Usually gamw=10 kN/m**3 or gamw=9.807 kN/m**3.
beta_function : obj or string
The non_darcy_beta function to use. e.g. non_darcy_beta_ideal
non_darcy_beat_constant, non_darcy_piecewise_constant.
This can either be the function object itself
or the name of the function e.g. 'non_darcy_beta_ideal'.
*args, **kwargs : various
The arguments to pass to the beta_function.
Returns
-------
eta : float
Value of eta parameter for non-Darcian flow
Examples
--------
>>> non_darcy_drain_eta(re=1.5, iL=10, gamw=10,
... beta_function='non_darcy_beta_ideal', n=15, nflow=1.3, nterms=20)
0.09807...
>>> non_darcy_drain_eta(1.5, 10, 10,
... 'non_darcy_beta_ideal', 15, nflow=1.3, nterms=20)
0.09807...
>>> non_darcy_drain_eta(re=1.5, iL=10, gamw=10,
... beta_function='non_darcy_beta_ideal', n=np.array([20.0, 15.0]),
... nflow=np.array([1.000001, 1.3]), nterms=20)
array([0.3943..., 0.0980...])
"""
# beta_function is object or string
try:
beta_fn = globals()[beta_function]
except KeyError:
beta_fn = beta_function
# extract n=re/rw from the **kwargs dict or 1st element of the *arg list
try:
n = kwargs['n']
except:
n = args[0]
rw = re / n
nflow = kwargs['nflow']
beta = beta_fn(*args, **kwargs)
eta = 2 / (re**2 * beta**nflow * (rw * gamw)**(nflow - 1)
* nflow * iL**(nflow - 1))
return eta
########################################################################
#scratch()
def scratch():
"""scratch pad for testing latex markup for docstrings
"""
#scratch()
pass
if __name__ == '__main__':
# watch()
import nose
nose.runmodule(argv=['nose', '--verbosity=3', '--with-doctest', '--doctest-options=+ELLIPSIS'])
eta = 5
pattern = 't'
mu_function = mu_overlapping_linear
rw = 0.05
s = 5#[5,6]
kap = 2#[2,1]
muw = 1
print(back_calc_drain_spacing_from_eta(eta, pattern, mu_function, rw, s, kap, muw))
#u_constant()
#k_overlapping_linear(()
scratch()
# print('lin',u_linear(5,2,3,[1.5,4]))
# print('pwise', u_piecewise_linear([1,2,5],[3,1,1],[1.5,4]))
# x = np.array(
# [1., 1.06779661, 1.13559322, 1.20338983, 1.27118644,
# 1.33898305, 1.40677966, 1.47457627, 1.54237288, 1.61016949,
# 1.6779661 , 1.74576271, 1.81355932, 1.88135593, 1.94915254,
# 2.01694915, 2.08474576, 2.15254237, 2.22033898, 2.28813559,
# 2.3559322 , 2.42372881, 2.49152542, 2.55932203, 2.62711864,
# 2.69491525, 2.76271186, 2.83050847, 2.89830508, 2.96610169,
# 3.03389831, 3.10169492, 3.16949153, 3.23728814, 3.30508475,
# 3.37288136, 3.44067797, 3.50847458, 3.57627119, 3.6440678 ,
# 3.71186441, 3.77966102, 3.84745763, 3.91525424, 3.98305085,
# 4.05084746, 4.11864407, 4.18644068, 4.25423729, 4.3220339 ,
# 4.38983051, 4.45762712, 4.52542373, 4.59322034, 4.66101695,
# 4.72881356, 4.79661017, 4.86440678, 4.93220339, 5., 30 ])
#
# y = 1.0/np.array(
# [0.5 , 0.50847458, 0.51694915, 0.52542373, 0.53389831,
# 0.54237288, 0.55084746, 0.55932203, 0.56779661, 0.57627119,
# 0.58474576, 0.59322034, 0.60169492, 0.61016949, 0.61864407,
# 0.62711864, 0.63559322, 0.6440678 , 0.65254237, 0.66101695,
# 0.66949153, 0.6779661 , 0.68644068, 0.69491525, 0.70338983,
# 0.71186441, 0.72033898, 0.72881356, 0.73728814, 0.74576271,
# 0.75423729, 0.76271186, 0.77118644, 0.77966102, 0.78813559,
# 0.79661017, 0.80508475, 0.81355932, 0.8220339 , 0.83050847,
# 0.83898305, 0.84745763, 0.8559322 , 0.86440678, 0.87288136,
# 0.88135593, 0.88983051, 0.89830508, 0.90677966, 0.91525424,
# 0.92372881, 0.93220339, 0.94067797, 0.94915254, 0.95762712,
# 0.96610169, 0.97457627, 0.98305085, 0.99152542, 1., 1. ])
#
# mu_piecewise_linear(x,y)
# mu_overlapping_linear(np.array([5,10]),
# np.array([7, 12]),
# np.array([1.6, 1.5,]))
# mu_piecewise_linear([1, 5],
# [1, 1])
#
# s=80
# n=18
# kap=8
# x = np.linspace(1,n,50)
# y = k_overlapping_linear(n,s, kap, x)
# plt.plot(x,y)
# plt.gca().grid()
# plt.show()
#
# xp = np.array(
# [1., 1.06779661, 1.13559322, 1.20338983, 1.27118644,
# 1.33898305, 1.40677966, 1.47457627, 1.54237288, 1.61016949,
# 1.6779661 , 1.74576271, 1.81355932, 1.88135593, 1.94915254,
# 2.01694915, 2.08474576, 2.15254237, 2.22033898, 2.28813559,
# 2.3559322 , 2.42372881, 2.49152542, 2.55932203, 2.62711864,
# 2.69491525, 2.76271186, 2.83050847, 2.89830508, 2.96610169,
# 3.03389831, 3.10169492, 3.16949153, 3.23728814, 3.30508475,
# 3.37288136, 3.44067797, 3.50847458, 3.57627119, 3.6440678 ,
# 3.71186441, 3.77966102, 3.84745763, 3.91525424, 3.98305085,
# 4.05084746, 4.11864407, 4.18644068, 4.25423729, 4.3220339 ,
# 4.38983051, 4.45762712, 4.52542373, 4.59322034, 4.66101695,
# 4.72881356, 4.79661017, 4.86440678, 4.93220339, 5., 30 ])
#
# yp = 1.0/np.array(
# [ 0.5 , 0.51680552, 0.53332376, 0.54955473, 0.56549842,
# 0.58115484, 0.59652399, 0.61160586, 0.62640046, 0.64090779,
# 0.65512784, 0.66906061, 0.68270612, 0.69606435, 0.70913531,
# 0.72191899, 0.7344154 , 0.74662453, 0.75854639, 0.77018098,
# 0.7815283 , 0.79258834, 0.8033611 , 0.8138466 , 0.82404481,
# 0.83395576, 0.84357943, 0.85291583, 0.86196495, 0.8707268 ,
# 0.87920138, 0.88738868, 0.89528871, 0.90290147, 0.91022695,
# 0.91726515, 0.92401609, 0.93047975, 0.93665613, 0.94254525,
# 0.94814708, 0.95346165, 0.95848894, 0.96322896, 0.9676817 ,
# 0.97184717, 0.97572537, 0.97931629, 0.98261994, 0.98563631,
# 0.98836541, 0.99080724, 0.99296179, 0.99482907, 0.99640908,
# 0.99770181, 0.99870727, 0.99942545, 0.99985636, 1., 1. ])
#
#
#
# n=30
# s=5
# kap=2
# muw=0
# uw=-0.2
# x = np.linspace(1, s, 60)
# y = k_linear(n, s, kap, x)
#
# x = np.linspace(1, n, 400)
# y = u_ideal(n,x, uw=uw,muw=muw)
# y2 = u_parabolic(n,s,kap,x, uw=uw,muw=muw)
# y3 = u_linear(n,s,kap,x, uw=uw,muw=muw)
# y4 = u_constant(n,s,kap,x, uw=uw,muw=muw)
# y5 = u_piecewise_constant([s,n], [kap,1],x, uw=uw,muw=muw)
## y6 = u_piecewise_linear([1,s,s,n], [kap,kap,1,1], x, uw=uw,muw=muw)
##
## y7 = u_piecewise_linear([1,s,n], [kap,1,1], x, uw=uw,muw=muw)
## y8 = u_piecewise_linear(xp, yp, x, uw=uw,muw=muw)
## print(repr(x))
## print(repr(y))
# plt.plot(x,y, '-',label='ideal')
# plt.plot(x, y2, '--',label='para')
# plt.plot(x, y3, dashes=[5,2,2,2],label='lin')
# plt.plot(x, y4, dashes=[8,2],label='const')
# plt.plot(x, y5,'+',ms=2, label='pwisec')
## plt.plot(x, y6,'o',ms=3, label='pwisel')
## plt.plot(x, y7,'^',ms=3, label='pwisel_lin')
## plt.plot(x, y8,'^',ms=3, label='pwisel_para')
# leg=plt.gca().legend(loc=4)
# plt.gca().grid()
# plt.show()
mu_piecewise_constant([1.5,5],
[1.6,1])
scratch()
print(mu_parabolic(30,5,2))
print(k_parabolic(30, 5, 2, [1, 1.13559322]))
k_parabolic(20,1,2,[4,6,7])
# mu_linear()
# nose.runmodule(argv=['nose', '--verbosity=3'])
# print(mu_ideal(0.5))
# print(mu_linear(np.array([50,100]),
# np.array([10,20]),
# np.array([5,3])))
|
gpl-3.0
|
AlexanderFabisch/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
28
|
11870
|
"""Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2))
* RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4)
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0])
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 1000000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 2)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(9):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y*2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 1000 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(1000):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
|
bsd-3-clause
|
dankolbman/NumericalAnalysis
|
Homeworks/HW2/Problem5ii.py
|
1
|
3007
|
import math
import scipy.interpolate as intrp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
font = {'family' : 'normal',
'size' : 24}
rc('font', **font)
### The function
def f(t):
return 1/(1+t**2)
# Spline
def spline(xpts, ypts):
n = len(xpts)
mat = np.zeros(( n, n))
rhs = np.zeros(( n,1 ))
for i in range(1,n-1):
rhs[i] = 6 * ( (ypts[i+1]-ypts[i]) / (xpts[i+1]-xpts[i]) \
-(ypts[i]-ypts[i-1]) / (xpts[i]-xpts[i-1]) )
for j in range(0,n-1):
# Set triagonal elements
if(j==i-1): mat[i][j] += xpts[i] - xpts[i-1]
elif(j==i): mat[i][j] += 2*(xpts[i+1]-xpts[i-1])
elif(j==i+1): mat[i][j] += xpts[i+1]-xpts[i]
# BCs
mat[0][0] = 1
mat[-1][-1] = 1
rhs[0] = 0
rhs[-1] = 0
# Solve it
x_vec = np.linalg.solve(mat, rhs)
return x_vec
#######
# The function
x = [ i/100 for i in range(-500,500) ]
fx = [ f(i) for i in x ]
plt.plot(x,fx, 'k--',label='f(t)', linewidth=5)
### 5 points
xpts = np.linspace(-5, 5, 5)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'r', label='5 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 5 Points:', rmse)
### 10 points
xpts = np.linspace(-5, 5, 10)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'b', label='10 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 10 Points:', rmse)
### 15 points
xpts = np.linspace(-5, 5, 15)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'g', label='15 Points',linewidth=3)
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 15 Points:', rmse)
plt.legend(fontsize=16)
plt.ylim( [-0.2, 1.1] )
plt.title('Natural Cubic Splines for $f(t)$')
plt.savefig('Problem5ii.png')
plt.show()
|
mit
|
caseyclements/bokeh
|
examples/interactions/us_marriages_divorces/us_marriages_divorces_interactive.py
|
26
|
3437
|
# coding: utf-8
# Plotting U.S. marriage and divorce statistics
#
# Example code by Randal S. Olson (http://www.randalolson.com)
from bokeh.plotting import figure, show, output_file, ColumnDataSource
from bokeh.models import HoverTool, NumeralTickFormatter
from bokeh.models import SingleIntervalTicker, LinearAxis
import pandas as pd
# Since the data set is loaded in the bokeh data repository, we can do this:
from bokeh.sampledata.us_marriages_divorces import data
md_data = data.copy()
# Fill in missing data with a simple linear interpolation
md_data = md_data.interpolate(method='linear', axis=0).ffill().bfill()
# Tell Bokeh where to save the interactive chart
output_file('us_marriages_divorces_per_capita.html',
# Tell Bokeh to use its minified JavaScript hosted on a
# cdn instead of putting the Bokeh JS in the output file
# Warning: This makes it so people can only view the
# chart with an internet connection
mode='cdn',
title='144 years of marriage and divorce in the U.S.A.')
# Set up the data sources for the lines we'll be plotting.
# We need separate data sources for each line because we're
# displaying different data in the hover tool.
source_marriages = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Marriages_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Marriages_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['marriages per 1,000 people'] * len(md_data),
)
)
source_divorces = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Divorces_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Divorces_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['divorces and annulments per 1,000 people'] * len(md_data),
)
)
# Use HTML to mark up the tooltip that displays over the chart
# Note that the variables in the data sources (above) are referenced with a @
hover = HoverTool(
tooltips='<font face="Arial" size="3">@y_text @desc in @x</font>')
# Select the tools that will be available to the chart
TOOLS = ['pan,wheel_zoom,box_zoom,reset,save,resize'] + [hover]
bplot = figure(tools=TOOLS, width=800, height=500, x_axis_type=None)
# Create a custom x-axis with 10-year intervals
ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
xaxis = LinearAxis(ticker=ticker)
bplot.add_layout(xaxis, 'below')
# Customize the y-axis
bplot.yaxis.formatter = NumeralTickFormatter(format='0.0a')
bplot.yaxis.axis_label = '# per 1,000 people'
# Provide a descriptive title for the chart
bplot.title = '144 years of marriage and divorce in the U.S.'
# Finally, plot the data!
# Note that the data source determines what is plotted and what shows in
# the tooltips
bplot.line('x', 'y', color='#1f77b4', line_width=3, source=source_marriages)
bplot.line('x', 'y', color='#ff7f0e', line_width=3, source=source_divorces)
show(bplot)
|
bsd-3-clause
|
swartn/sam-vs-jet-paper
|
analysis_plotting/discover_psl_trend_maps_hadslp2r_2004_vs_2011_ending.py
|
1
|
3395
|
"""
Compare maps of HadSLP2r trends over 1951-2004 and 1951-2011.
.. moduleauthor:: Neil Swart <neil.swart@ec.gc.ca>
"""
import h5py
import cmipdata as cd
import os
os.system('rm -f /tmp/cdo*')
import numpy as np
import scipy as sp
from mpl_toolkits.basemap import Basemap, addcyclic
import matplotlib.pyplot as plt
import matplotlib as mpl
import brewer2mpl
from discrete_cmap import discrete_cmap
from netCDF4 import Dataset,num2date,date2num
plt.ion()
plt.close('all')
pth = '/raid/ra40/data/ncs/cmip5/psl/'
plt.rc('font', size=10)
# to 2011
# HadSLP data is now with reanlyses.
h5f = h5py.File('/raid/ra40/data/ncs/cmip5/sam/reanalysis_trends.h5','r')
slopes = h5f['psl/1951_2011/rean_psl_trend_1951_2011'][:]*120
#rean = h5f['psl/1951_2011/reanalysis_names'][:]
h5f.close()
psl_slope_hadslp_2011 = slopes[:,:,1]
# to 2004
# HadSLP data is now with reanlyses.
h5f = h5py.File('/raid/ra40/data/ncs/cmip5/sam/reanalysis_trends.h5','r')
slopes = h5f['psl/1951_2004/rean_psl_trend_1951_2004'][:]*120
rean = h5f['psl/1951_2004/reanalysis_names'][:]
h5f.close()
psl_slope_hadslp_2004 = slopes[:,:,1]
dims = {'lat' : np.arange(-89.5,89.6,1),
'lon' : np.arange(0,360,1)
}
fig, axa = plt.subplots(3,2, sharex=True, figsize=(7,7))
fig.subplots_adjust(top=0.5, hspace=0.1, wspace=0.05)
vmin = -80
vmax = 80
ncols = 11
cmap_anom = brewer2mpl.get_map('RdBu', 'diverging', ncols,
reverse=True).mpl_colormap
cmap_anom = discrete_cmap(ncols, cmap_anom)
m =\
Basemap(llcrnrlon=0,llcrnrlat=-90,urcrnrlon=360,urcrnrlat=0,projection='mill'
, fix_aspect=True)
lons, lats = np.meshgrid(dims['lon'], dims['lat'])
x, y = m(lons, lats)
xpt, ypt = m(20,-86)
cot = m.pcolor(x, y, psl_slope_hadslp_2004,vmin=vmin, vmax=vmax,
cmap=cmap_anom, ax=axa[0,0] )
cot.set_rasterized('True')
axa[0,0].text(xpt, ypt, 'HadSLP2r 2004')
com = m.pcolor(x, y, psl_slope_hadslp_2011,vmin=vmin, vmax=vmax,
cmap=cmap_anom, ax=axa[1,0] )
com.set_rasterized('True')
anom = psl_slope_hadslp_2011 - psl_slope_hadslp_2004
com = m.pcolor(x, y, anom,vmin=vmin, vmax=vmax
, cmap=cmap_anom,ax=axa[1,1] )
com.set_rasterized('True')
rmse = np.sqrt( np.mean(anom[0:89,:]**2) )
axa[1,0].text(xpt, ypt, 'HaSLP2r 2011')
axa[1,1].text(xpt, ypt, str(np.round(rmse,2)))
m.drawmeridians(np.arange(0,360,90),labels=[0,0,0,1], linewidth=0,yoffset=-0e6
, ax=axa[1,1])
m.drawmeridians(np.arange(0,360,90),labels=[0,0,0,1], linewidth=0,yoffset=-0e6
, ax=axa[1,0])
for i, ax in enumerate(axa.flatten()):
ax.autoscale(enable=True, axis='both', tight=True)
m.drawcoastlines(linewidth=1.25, ax=ax)
m.fillcontinents(color='0.8',ax=ax, zorder=2)
if i%2 ==0:
m.drawparallels(np.arange(-80,81,20),labels=[1,0,0,0], linewidth=0,
ax=ax)
box = axa[0,0].get_position()
tl = fig.add_axes([box.x0*1.1 + box.width * 1., box.y0, 0.02, box.height])
bounds = np.linspace(vmin, vmax, ncols)
plt.colorbar(cot, cax=tl, label='Pa decade$^{-1}$',spacing='proportional',
boundaries=bounds)
fig.delaxes(axa[0,1])
fig.delaxes(axa[2,0])
fig.delaxes(axa[2,1])
plt.savefig('psl_maps_HadSLP2r_1951_to_2004_vs_2011_ending.pdf'
,bbox_inches='tight' , dpi=300)
#os.system('eps2pdf psl_maps_HadSLP2r_1951_to_2004_vs_2011_ending.eps')
#os.system('rm -f psl_maps_HadSLP2r_1951_to_2004_vs_2011_ending.eps')
|
gpl-2.0
|
pypot/scikit-learn
|
sklearn/naive_bayes.py
|
128
|
28358
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
bsd-3-clause
|
ustroetz/python-osrm
|
osrm/core.py
|
1
|
19119
|
# -*- coding: utf-8 -*-
import numpy as np
from polyline.codec import PolylineCodec
from polyline import encode as polyline_encode
from pandas import DataFrame
from . import RequestConfig
try:
from urllib.request import urlopen, Request
from urllib.parse import quote
except:
from urllib2 import urlopen, Request
from urllib2 import quote
try:
from osgeo.ogr import Geometry
except:
from ogr import Geometry
import json
def _chain(*lists):
for li in lists:
for elem in li:
yield elem
def check_host(host):
""" Helper function to get the hostname in desired format """
if not ('http' in host and '//' in host) and host[len(host)-1] == '/':
return ''.join(['http://', host[:len(host)-1]])
elif not ('http' in host and '//' in host):
return ''.join(['http://', host])
elif host[len(host)-1] == '/':
return host[:len(host)-1]
else:
return host
def match(points, steps=False, overview="simplified", geometry="polyline",
timestamps=None, radius=None, annotations="false", gaps="split",
tidy=False, waypoints=None, url_config=RequestConfig):
"""
Function wrapping OSRM 'match' function, returning the reponse in JSON
Parameters
----------
points : list of tuple/list of point
A sequence of points as (x ,y) where x is longitude and y is latitude.
steps : bool, optional
Default is False.
overview : str, optional
Query for the geometry overview, either "simplified", "full" or "false"
(Default: "simplified")
geometry : str, optional
Format in which decode the geometry, either "polyline" (ie. not decoded),
"geojson", "WKT" or "WKB" (default: "polyline").
timestamps : list of timestamp, optional
radius : list of float, optional
annotations : bool, optional
gaps : str, optional
tidy : bool, optional
waypoints : list of tuple/list of point, optional
url_config : osrm.RequestConfig, optional
Parameters regarding the host, version and profile to use
Returns
-------
dict
The response from the osrm instance, parsed as a dict
"""
host = check_host(url_config.host)
url = [
host, '/match/', url_config.version, '/', url_config.profile, '/',
';'.join(
[','.join([str(coord[0]), str(coord[1])]) for coord in points]),
"?overview={}&steps={}&geometries={}&annotations={}&gaps={}&tidy={}"
.format(overview, str(steps).lower(), geometry, annotations, gaps, str(tidy).lower())
]
if radius:
url.append("&radiuses=")
url.append(";".join([str(rad) for rad in radius]))
if timestamps:
url.append("×tamps=")
url.append(";".join([str(timestamp) for timestamp in timestamps]))
if waypoints:
url.append("&waypoints=")
url.append(";".join([str(waypoint) for waypoint in waypoints]))
req = Request("".join(url))
if url_config.auth:
req.add_header("Authorization", url_config.auth)
r = urlopen(req)
r_json = json.loads(r.read().decode('utf-8'))
if "code" not in r_json or "Ok" not in r_json["code"]:
if 'matchings' in r_json.keys():
for i, _ in enumerate(r_json['matchings']):
geom_encoded = r_json["matchings"][i]["geometry"]
geom_decoded = [[point[1] / 10.0,
point[0] / 10.0] for point
in PolylineCodec().decode(geom_encoded)]
r_json["matchings"][i]["geometry"] = geom_decoded
else:
print('No matching geometry to decode')
return r_json
def decode_geom(encoded_polyline):
"""
Function decoding an encoded polyline (with 'encoded polyline
algorithm') and returning an ogr.Geometry object
Parameters
----------
encoded_polyline : str
The encoded string to decode.
Returns
-------
line : ogr.Geometry
The line geometry, as an ogr.Geometry instance.
"""
ma_ligne = Geometry(2)
lineAddPts = ma_ligne.AddPoint_2D
for coord in PolylineCodec().decode(encoded_polyline):
lineAddPts(coord[1], coord[0])
return ma_ligne
def simple_route(coord_origin, coord_dest, coord_intermediate=None,
alternatives=False, steps=False, output="full",
geometry='polyline', overview="simplified",
annotations='true', continue_straight='default',
url_config=RequestConfig, send_as_polyline=True):
"""
Function wrapping OSRM 'viaroute' function and returning the JSON reponse
with the route_geometry decoded (in WKT or WKB) if needed.
Parameters
----------
coord_origin : list/tuple of two floats
(x ,y) where x is longitude and y is latitude
coord_dest : list/tuple of two floats
(x ,y) where x is longitude and y is latitude
coord_intermediate : list of 2-floats list/tuple
[(x ,y), (x, y), ...] where x is longitude and y is latitude
alternatives : bool, optional
Query (and resolve geometry if asked) for alternatives routes
(default: False)
output : str, optional
Define the type of output (full response or only route(s)), default : "full".
geometry : str, optional
Format in which decode the geometry, either "polyline" (ie. not decoded),
"geojson", "WKT" or "WKB" (default: "polyline").
annotations : str, optional
continue_straight : str, optional
overview : str, optional
Query for the geometry overview, either "simplified", "full" or "false"
(Default: "simplified")
url_config : osrm.RequestConfig, optional
Parameters regarding the host, version and profile to use
Returns
-------
result : dict
The result, parsed as a dict, with the geometry decoded in the format
defined in `geometry`.
"""
if geometry.lower() not in ('wkt', 'well-known-text', 'text', 'polyline',
'wkb', 'well-known-binary', 'geojson'):
raise ValueError("Invalid output format")
else:
geom_request = "geojson" if "geojson" in geometry.lower() \
else "polyline"
host = check_host(url_config.host)
if not send_as_polyline:
url = [host, "/route/", url_config.version, "/", url_config.profile,
"/", "{},{}".format(coord_origin[0], coord_origin[1]), ';']
if coord_intermediate:
url.append(";".join(
[','.join([str(i), str(j)]) for i, j in coord_intermediate]))
url.append(";")
url.extend([
'{},{}'.format(coord_dest[0], coord_dest[1]),
"?overview={}&steps={}&alternatives={}&geometries={}&annotations={}&continue_straight={}".format(
overview, str(steps).lower(),
str(alternatives).lower(), geom_request, annotations,
continue_straight)
])
else:
coords = [
pt[::-1] for pt in _chain(
[coord_origin],
coord_intermediate if coord_intermediate else [],
[coord_dest])
]
url = [
host, "/route/", url_config.version, "/", url_config.profile, "/",
"polyline(", quote(polyline_encode(coords)), ")",
"?overview={}&steps={}&alternatives={}&geometries={}&annotations={}&continue_straight={}".format(
overview, str(steps).lower(),
str(alternatives).lower(), geom_request, annotations,
continue_straight)
]
req = Request("".join(url))
if url_config.auth:
req.add_header("Authorization", url_config.auth)
rep = urlopen(req)
parsed_json = json.loads(rep.read().decode('utf-8'))
if "Ok" in parsed_json['code']:
if geometry in ("polyline", "geojson") and output == "full":
return parsed_json
elif geometry in ("polyline", "geojson") and output == "routes":
return parsed_json["routes"]
else:
if geometry.lower() == "wkb":
func = Geometry.ExportToWkb
elif geometry.lower() == "wkt":
func = Geometry.ExportToWkt
for route in parsed_json["routes"]:
route["geometry"] = func(decode_geom(route["geometry"]))
return parsed_json if output == "full" else parsed_json["routes"]
else:
raise ValueError(
'Error - OSRM status : {} \n Full json reponse : {}'.format(
parsed_json['code'], parsed_json))
def table(coords_src, coords_dest=None,
ids_origin=None, ids_dest=None,
output='np', minutes=False, annotations='duration',
url_config=RequestConfig, send_as_polyline=True):
"""
Function wrapping OSRM 'table' function in order to get a matrix of
time distance as a numpy array or as a DataFrame
Parameters
----------
coords_src : list
A list of coord as (longitude, latitude) , like :
list_coords = [(21.3224, 45.2358),
(21.3856, 42.0094),
(20.9574, 41.5286)] (coords have to be float)
coords_dest : list, optional
A list of coord as (longitude, latitude) , like :
list_coords = [(21.3224, 45.2358),
(21.3856, 42.0094),
(20.9574, 41.5286)] (coords have to be float)
ids_origin : list, optional
A list of name/id to use to label the source axis of
the result `DataFrame` (default: None).
ids_dest : list, optional
A list of name/id to use to label the destination axis of
the result `DataFrame` (default: None).
output : str, optional
The type of annotated matrice to return (DataFrame or numpy array)
'raw' for the (parsed) json response from OSRM
'pandas', 'df' or 'DataFrame' for a DataFrame
'numpy', 'array' or 'np' for a numpy array (default is "np")
annotations : str, optional
Either 'duration' (default) or 'distance'
url_config: osrm.RequestConfig, optional
Parameters regarding the host, version and profile to use
Returns
-------
- if output=='raw' : a dict, the parsed json response.
- if output=='np' : a numpy.ndarray containing the time in minutes,
a list of snapped origin coordinates,
a list of snapped destination coordinates.
- if output=='pandas' : a labeled DataFrame containing the time matrix in minutes,
a list of snapped origin coordinates,
a list of snapped destination coordinates.
"""
if output.lower() in ('numpy', 'array', 'np'):
output = 1
elif output.lower() in ('pandas', 'dataframe', 'df'):
output = 2
else:
output = 3
host = check_host(url_config.host)
url = ''.join(
[host, '/table/', url_config.version, '/', url_config.profile, '/'])
if not send_as_polyline:
if not coords_dest:
url = ''.join([
url,
';'.join([','.join([str(coord[0]), str(coord[1])])
for coord in coords_src]),
'?annotations={}'.format(annotations)
])
else:
src_end = len(coords_src)
dest_end = src_end + len(coords_dest)
url = ''.join([
url,
';'.join([','.join([str(coord[0]), str(coord[1])])
for coord in _chain(coords_src, coords_dest)]),
'?sources=',
';'.join([str(i) for i in range(src_end)]),
'&destinations=',
';'.join([str(j) for j in range(src_end, dest_end)]),
'&annotations={}'.format(annotations)
])
else:
if not coords_dest:
url = ''.join([
url,
"polyline(",
quote(polyline_encode([(c[1], c[0]) for c in coords_src])),
")",
'?annotations={}'.format(annotations)
])
else:
src_end = len(coords_src)
dest_end = src_end + len(coords_dest)
url = ''.join([
url,
"polyline(",
quote(polyline_encode(
[(c[1], c[0]) for c in _chain(coords_src, coords_dest)])),
")",
'?sources=',
';'.join([str(i) for i in range(src_end)]),
'&destinations=',
';'.join([str(j) for j in range(src_end, dest_end)]),
'&annotations={}'.format(annotations)
])
req = Request(url)
if url_config.auth:
req.add_header("Authorization", url_config.auth)
rep = urlopen(req)
parsed_json = json.loads(rep.read().decode('utf-8'))
if "code" not in parsed_json or "Ok" not in parsed_json["code"]:
raise ValueError('No distance table return by OSRM instance')
elif output == 3:
return parsed_json
else:
annoted = np.array(parsed_json['{}s'.format(annotations)], dtype=float)
new_src_coords = [ft["location"] for ft in parsed_json["sources"]]
new_dest_coords = None if not coords_dest \
else [ft["location"] for ft in parsed_json["destinations"]]
if minutes and annotations == 'duration': # Conversion in minutes with 2 decimals:
annoted = np.around((annoted / 60), 2)
if output == 2:
if not ids_origin:
ids_origin = [i for i in range(len(coords_src))]
if not ids_dest:
ids_dest = ids_origin if not coords_dest \
else [i for i in range(len(coords_dest))]
annoted = DataFrame(annoted,
index=ids_origin,
columns=ids_dest,
dtype=float)
return annoted, new_src_coords, new_dest_coords
def nearest(coord, number=1, url_config=RequestConfig):
"""
Useless function wrapping OSRM 'nearest' function,
returning the reponse in JSON
Parameters
----------
coord : list/tuple of two floats
(x ,y) where x is longitude and y is latitude
number : int, optional
url_config : osrm.RequestConfig, optional
Parameters regarding the host, version and profile to use
Returns
-------
result : dict
The response from the osrm instance, parsed as a dict
"""
host = check_host(url_config.host)
url = ''.join([
host, '/nearest/', url_config.version, '/', url_config.profile, '/',
','.join(map(str, coord)), '?number={}'.format(number)
])
req = Request(url)
if url_config.auth:
req.add_header("Authorization", url_config.auth)
rep = urlopen(req)
parsed_json = json.loads(rep.read().decode('utf-8'))
return parsed_json
def trip(coords, steps=False, output="full",
geometry='polyline', overview="simplified",
roundtrip=True, source="any", destination="any",
annotations="false", url_config=RequestConfig, send_as_polyline=True):
"""
Function wrapping OSRM 'trip' function and returning the JSON reponse
with the route_geometry decoded (in WKT or WKB) if needed.
Parameters
----------
coord_origin : list/tuple of two floats
(x ,y) where x is longitude and y is latitude
steps : bool, default False
output : str, default 'full'
Define the type of output (full response or only route(s))
geometry : str, optional
Format in which decode the geometry, either "polyline" (ie. not decoded),
"geojson", "WKT" or "WKB" (default: "polyline").
overview : str, optional
Query for the geometry overview, either "simplified", "full" or "false"
(Default: "simplified")
roundtrip : bool, optional
source : str, optional
destination : str, optional
annotations : str, optional
url_config : osrm.RequestConfig, optional
Parameters regarding the host, version and profile to use
Returns
-------
- if 'only_index' : a dict containing respective indexes
of trips and waypoints
- if 'raw' : the original json returned by OSRM
- if 'WKT' : the json returned by OSRM with the 'route_geometry' converted
in WKT format
- if 'WKB' : the json returned by OSRM with the 'route_geometry' converted
in WKB format
"""
if geometry.lower() not in ('wkt', 'well-known-text', 'text', 'polyline',
'wkb', 'well-known-binary', 'geojson'):
raise ValueError("Invalid output format")
else:
geom_request = "geojson" if "geojson" in geometry.lower() \
else "polyline"
host = check_host(url_config.host)
coords_request = \
"".join(['polyline(',
quote(polyline_encode([(c[1], c[0]) for c in coords])),
')']) \
if send_as_polyline \
else ';'.join([','.join([str(c[0]), str(c[1])]) for c in coords])
url = ''.join([
host, '/trip/', url_config.version, '/', url_config.profile, '/',
coords_request,
'?steps={}'.format(str(steps).lower()),
'&geometries={}'.format(geom_request),
'&overview={}'.format(overview),
'&roundtrip={}'.format(str(roundtrip).lower()),
'&source={}'.format(source),
'&destination={}'.format(destination),
'&annotations={}'.format(annotations)
])
req = Request(url)
if url_config.auth:
req.add_header("Authorization", url_config.auth)
rep = urlopen(req)
parsed_json = json.loads(rep.read().decode('utf-8'))
if "Ok" in parsed_json['code']:
if "only_index" in output:
return [
{"waypoint": i["waypoint_index"], "trip": i["trips_index"]}
for i in parsed_json['waypoints']
]
if geometry in ("polyline", "geojson") and output == "full":
return parsed_json
elif geometry in ("polyline", "geojson") and output == "trip":
return parsed_json["trips"]
else:
func = Geometry.ExportToWkb if geometry.lower() == "wkb" \
else Geometry.ExportToWkt
for trip_route in parsed_json["trips"]:
trip_route["geometry"] = func(decode_geom(
trip_route["geometry"]))
return parsed_json if output == "full" else parsed_json["routes"]
else:
raise ValueError(
'Error - OSRM status : {} \n Full json reponse : {}'
.format(parsed_json['code'], parsed_json))
|
mit
|
Trigition/MTG-DataScraper
|
scripts/card_types.py
|
1
|
1497
|
import pandas as pd
from collections import OrderedDict
def split_type(subtype_string):
subtypes = [x for x in subtype_string.decode('utf8').split(' ')]
return subtypes
def get_all_supertypes(data):
supertypes = [string.encode('utf8') for string in data['supertypes'].unique()]
return sorted(supertypes)
def get_subtypes(supertype, data):
query = data[data['supertypes'] == supertype]
query = query.dropna(subset=['subtypes'])
all_subtypes = set()
for index, row in query.iterrows():
subtypes = split_type(row['subtypes'])
#all_subtypes.add(subtypes)
for subtype in subtypes:
all_subtypes.add(subtype)
return sorted( list(all_subtypes) )
def get_all_types(data, column):
query = data.dropna(subset=[column])
all_types = set()
for index, row in query.iterrows():
types = split_type(row[column])
for cur_type in types:
all_types.add(cur_type.encode('utf8'))
return sorted( list(all_types) )
def get_join_table(cards, column):
table = []
for index, card in cards.iterrows():
# Grab subtypes
type_order = 1
if type(card[column]) is not float:
for subtype in split_type(card[column]):
row = {}
row['card_id'] = card['name']
row['type_id'] = subtype
row['type_order'] = str(type_order)
table.append(row)
type_order += 1
return table
|
mit
|
linan7788626/brut
|
figures/cluster_confusion.py
|
2
|
2014
|
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
matplotlib.rcParams['axes.grid'] = False
matplotlib.rcParams['axes.facecolor'] = '#ffffff'
from scipy.ndimage import label
from bubbly.cluster import merge
from bubbly.field import get_field
from bubbly.util import scale
from bubbly.dr1 import bubble_params
def plot_stamps(stamps, **kwargs):
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'b')
kwargs.setdefault('alpha', .1)
label = kwargs.pop('label', None)
ax = plt.gca()
for s in stamps:
r = Rectangle((s[1] - s[-1], s[2] - s[-1]),
width = 2 * s[-1], height = 2 * s[-1], **kwargs)
ax.add_patch(r)
if label is not None:
plt.plot([np.nan], [np.nan], '-', color = kwargs['edgecolor'],
label=label)
def main():
data = json.load(open('../models/l035_scores.json'))
stamps = np.array(data['stamps'])
scores = np.array(data['scores'])
l = stamps[:, 1]
b = stamps[:, 2]
good = (scores > .1) & (l < 35.17) & (l > 34.9) & (b > -.9) & (b < -0.6)
stamps = stamps[good]
scores = scores[good]
merged = merge(stamps, scores)
mwp = np.array(bubble_params())
mwp = mwp[(mwp[:, 1] < 35.3) & (mwp[:, 1] > 35)]
f = get_field(35)
bad = f.mips == 0
g = scale(f.i4, limits=[30, 99.8])
r = scale(f.mips, limits=[30, 99.7])
r[bad] = 255
b = r * 0
im = np.dstack((r, g, b))
plt.figure(dpi=200, tight_layout=True)
plt.imshow(im, extent=[36, 34, -1, 1], interpolation="bicubic")
plot_stamps(merged, edgecolor='#7570b3', linewidth=2, label='Brut')
plot_stamps(mwp, edgecolor='#e7298a', linewidth=2, label='MWP')
plt.xlim(35.2, 35)
plt.ylim(-.825, -.625)
plt.legend(loc='upper right')
plt.xlabel("$\ell$ ($^\circ$)")
plt.ylabel("b ($^\circ$)")
plt.savefig('cluster_confusion.eps')
if __name__ == "__main__":
main()
|
mit
|
h2educ/scikit-learn
|
sklearn/preprocessing/tests/test_imputation.py
|
213
|
11911
|
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
|
bsd-3-clause
|
rerthal/mc886
|
proj03/proj03.py
|
1
|
4044
|
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.cross_validation import StratifiedKFold
from sklearn.neighbors import NeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
import numpy as np
def print_result(rates):
print int(np.mean(rates) * 100), '+/-', int(np.std(rates) * 100)
def read_data(file_name):
cast_int = lambda x: 1 if x == 'M' else 0
lines = open(file_name,'r').readlines()
records = [line[0:-1].split(',') for line in lines]
data = [map(float, record[0:-1]) for record in records]
labels = [cast_int(record[-1]) for record in records]
return (data, labels)
def correct_predictions(classifier, test):
matches = 0
for i in range(len(test[0])):
classified = classifier.predict(test[0][i])
if classified[0] == test[1][i]: matches += 1
return matches
def best_grid(data, grid, Classifier):
best_k = 0
highest_matches = 0
for K in grid:
matches = 0
for train_index, test_index in StratifiedKFold(data[1], 5):
train = (
[i for j, i in enumerate(data[0]) if j not in train_index],
[i for j, i in enumerate(data[1]) if j not in train_index]
)
test = (
[i for j, i in enumerate(data[0]) if j not in test_index],
[i for j, i in enumerate(data[1]) if j not in test_index]
)
classifier = Classifier(K)
classifier.fit(train[0], train[1])
matches += correct_predictions(classifier, test)
if matches > highest_matches:
best_k = K
highest_matches = matches
return best_k
def rates(data, grid, Classifier):
rates = []
for train_index, test_index in StratifiedKFold(data[1], 5):
train = (
[i for j, i in enumerate(data[0]) if j not in train_index],
[i for j, i in enumerate(data[1]) if j not in train_index]
)
test = (
[i for j, i in enumerate(data[0]) if j not in test_index],
[i for j, i in enumerate(data[1]) if j not in test_index]
)
hiperparameter = best_grid(train, grid, Classifier)
classifier = Classifier(hiperparameter)
classifier.fit(train[0], train[1])
rate = correct_predictions(classifier, test) / float(len(test[0]))
rates.append(rate)
return rates
data = read_data("./data.csv")
data = (normalize(data[0]).tolist(), data[1])
data_pca = read_data("./data.csv")
data_pca = (normalize(data_pca[0]).tolist(), data_pca[1])
pca = PCA()
pca.fit(data_pca[0])
data_pca = (pca.transform(data_pca[0]).tolist(), data_pca[1])
kneighbors_grid = [1,3,5,11,21,31]
svm_linear_grid = [0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]
svm_rbf_grid = []
for i in svm_linear_grid:
for j in svm_linear_grid:
svm_rbf_grid.append([i,j])
randforest_grid = [2,3,5,10,20,40,60]
kneighbors_rates = rates(data, kneighbors_grid, lambda K: NeighborsClassifier(n_neighbors=K))
print_result(kneighbors_rates)
kneighbors_pca_rates = rates(data_pca, kneighbors_grid, lambda K: NeighborsClassifier(n_neighbors=K))
print_result(kneighbors_pca_rates)
svm_linear_rates = rates(data, svm_linear_grid, lambda C: SVC(C=C, kernel='linear'))
print_result(svm_linear_rates)
svm_linear_pca_rates = rates(data_pca, svm_linear_grid, lambda C: SVC(C=C, kernel='linear'))
print_result(svm_linear_pca_rates)
svm_rbf_rates = rates(data, svm_rbf_grid, lambda C: SVC(C=C[0], gamma=C[1], kernel='rbf'))
print_result(svm_rbf_rates)
svm_rbf_pca_rates = rates(data_pca, svm_rbf_grid, lambda C: SVC(C=C[0], gamma=C[1], kernel='rbf'))
print_result(svm_rbf_pca_rates)
randforest_rates = rates(data, randforest_grid, lambda C: RandomForestClassifier(max_depth=C))
print_result(kneighbors_rates)
randforest_pca_rates = rates(data_pca, randforest_grid, lambda C: RandomForestClassifier(max_depth=C))
print_result(kneighbors_pca_rates)
|
gpl-3.0
|
vega/ipython-vega
|
vega/tests/test_outputs.py
|
4
|
1203
|
import pytest
import pandas as pd
from .. import Vega, VegaLite
PANDAS_DATA = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
JSON_DATA = {
"values": [
{"x": 1, "y": 4},
{"x": 2, "y": 5},
{"x": 3, "y": 6}
]
}
VEGALITE_SPEC = {
"mark": "circle",
"encoding": {
"x": {"field": "x", "type": "quantitative"},
"y": {"field": "y", "type": "quantitative"}
}
}
# TODO: use an actual Vega spec
VEGA_SPEC = VEGALITE_SPEC
def test_vegalite_output():
# TODO: somehow test that output is valid HTML/JS?
spec_with_data = VEGALITE_SPEC.copy()
spec_with_data['data'] = JSON_DATA
# Test three ways of specifying data
obj1 = VegaLite(VEGALITE_SPEC, PANDAS_DATA)
obj2 = VegaLite(VEGALITE_SPEC, JSON_DATA['values'])
obj3 = VegaLite(spec_with_data)
js1 = obj1._generate_js(id='ABC', sort_keys=True)
js2 = obj2._generate_js(id='ABC', sort_keys=True)
js3 = obj3._generate_js(id='ABC', sort_keys=True)
assert js1 == js2 == js3
def test_vega_output():
# TODO: use an actual vega spec
data = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
obj = Vega(VEGA_SPEC, data)
js = obj._generate_js(id='ABC')
|
bsd-3-clause
|
eriklindernoren/ML-From-Scratch
|
mlfromscratch/unsupervised_learning/generative_adversarial_network.py
|
1
|
5842
|
from __future__ import print_function, division
from sklearn import datasets
import math
import matplotlib.pyplot as plt
import numpy as np
import progressbar
from sklearn.datasets import fetch_mldata
from mlfromscratch.deep_learning.optimizers import Adam
from mlfromscratch.deep_learning.loss_functions import CrossEntropy
from mlfromscratch.deep_learning.layers import Dense, Dropout, Flatten, Activation, Reshape, BatchNormalization
from mlfromscratch.deep_learning import NeuralNetwork
class GAN():
"""A Generative Adversarial Network with deep fully-connected neural nets as
Generator and Discriminator.
Training Data: MNIST Handwritten Digits (28x28 images)
"""
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.img_dim = self.img_rows * self.img_cols
self.latent_dim = 100
optimizer = Adam(learning_rate=0.0002, b1=0.5)
loss_function = CrossEntropy
# Build the discriminator
self.discriminator = self.build_discriminator(optimizer, loss_function)
# Build the generator
self.generator = self.build_generator(optimizer, loss_function)
# Build the combined model
self.combined = NeuralNetwork(optimizer=optimizer, loss=loss_function)
self.combined.layers.extend(self.generator.layers)
self.combined.layers.extend(self.discriminator.layers)
print ()
self.generator.summary(name="Generator")
self.discriminator.summary(name="Discriminator")
def build_generator(self, optimizer, loss_function):
model = NeuralNetwork(optimizer=optimizer, loss=loss_function)
model.add(Dense(256, input_shape=(self.latent_dim,)))
model.add(Activation('leaky_relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(Activation('leaky_relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(Activation('leaky_relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.img_dim))
model.add(Activation('tanh'))
return model
def build_discriminator(self, optimizer, loss_function):
model = NeuralNetwork(optimizer=optimizer, loss=loss_function)
model.add(Dense(512, input_shape=(self.img_dim,)))
model.add(Activation('leaky_relu'))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('leaky_relu'))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('softmax'))
return model
def train(self, n_epochs, batch_size=128, save_interval=50):
mnist = fetch_mldata('MNIST original')
X = mnist.data
y = mnist.target
# Rescale [-1, 1]
X = (X.astype(np.float32) - 127.5) / 127.5
half_batch = int(batch_size / 2)
for epoch in range(n_epochs):
# ---------------------
# Train Discriminator
# ---------------------
self.discriminator.set_trainable(True)
# Select a random half batch of images
idx = np.random.randint(0, X.shape[0], half_batch)
imgs = X[idx]
# Sample noise to use as generator input
noise = np.random.normal(0, 1, (half_batch, self.latent_dim))
# Generate a half batch of images
gen_imgs = self.generator.predict(noise)
# Valid = [1, 0], Fake = [0, 1]
valid = np.concatenate((np.ones((half_batch, 1)), np.zeros((half_batch, 1))), axis=1)
fake = np.concatenate((np.zeros((half_batch, 1)), np.ones((half_batch, 1))), axis=1)
# Train the discriminator
d_loss_real, d_acc_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake, d_acc_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * (d_loss_real + d_loss_fake)
d_acc = 0.5 * (d_acc_real + d_acc_fake)
# ---------------------
# Train Generator
# ---------------------
# We only want to train the generator for the combined model
self.discriminator.set_trainable(False)
# Sample noise and use as generator input
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# The generator wants the discriminator to label the generated samples as valid
valid = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))), axis=1)
# Train the generator
g_loss, g_acc = self.combined.train_on_batch(noise, valid)
# Display the progress
print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, acc: %.2f%%]" % (epoch, d_loss, 100*d_acc, g_loss, 100*g_acc))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
self.save_imgs(epoch)
def save_imgs(self, epoch):
r, c = 5, 5 # Grid size
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
# Generate images and reshape to image shape
gen_imgs = self.generator.predict(noise).reshape((-1, self.img_rows, self.img_cols))
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
plt.suptitle("Generative Adversarial Network")
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt,:,:], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("mnist_%d.png" % epoch)
plt.close()
if __name__ == '__main__':
gan = GAN()
gan.train(n_epochs=200000, batch_size=64, save_interval=400)
|
mit
|
MJuddBooth/pandas
|
pandas/util/_decorators.py
|
1
|
12592
|
from functools import wraps
import inspect
from textwrap import dedent
import warnings
from pandas._libs.properties import cache_readonly # noqa
from pandas.compat import PY2, signature
def deprecate(name, alternative, version, alt_name=None,
klass=None, stacklevel=2, msg=None):
"""
Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
warning_msg = msg or '{} is deprecated, use {} instead'.format(name,
alt_name)
@wraps(alternative)
def wrapper(*args, **kwargs):
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
# adding deprecated directive to the docstring
msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name)
doc_error_msg = ('deprecate needs a correctly formatted docstring in '
'the target function (should have a one liner short '
'summary, and opening quotes should be in their own '
'line). Found:\n{}'.format(alternative.__doc__))
# when python is running in optimized mode (i.e. `-OO`), docstrings are
# removed, so we check that a docstring with correct formatting is used
# but we allow empty docstrings
if alternative.__doc__:
if alternative.__doc__.count('\n') < 3:
raise AssertionError(doc_error_msg)
empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3)
if empty1 or empty2 and not summary:
raise AssertionError(doc_error_msg)
wrapper.__doc__ = dedent("""
{summary}
.. deprecated:: {depr_version}
{depr_msg}
{rest_of_docstring}""").format(summary=summary.strip(),
depr_version=version,
depr_msg=msg,
rest_of_docstring=dedent(doc))
return wrapper
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if new_arg_name is None and old_arg_value is not None:
msg = (
"the '{old_name}' keyword is deprecated and will be "
"removed in a future version. "
"Please take steps to stop the use of '{old_name}'"
).format(old_name=old_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = ("the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
).format(old_name=old_arg_name,
old_val=old_arg_value,
new_name=new_arg_name,
new_val=new_arg_value)
else:
new_arg_value = old_arg_value
msg = ("the '{old_name}' keyword is deprecated, "
"use '{new_name}' instead"
).format(old_name=old_arg_name,
new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify '{old_name}' or '{new_name}', "
"not both").format(old_name=old_arg_name,
new_name=new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg
def rewrite_axis_style_signature(name, extra_params):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
if not PY2:
kind = inspect.Parameter.POSITIONAL_OR_KEYWORD
params = [
inspect.Parameter('self', kind),
inspect.Parameter(name, kind, default=None),
inspect.Parameter('index', kind, default=None),
inspect.Parameter('columns', kind, default=None),
inspect.Parameter('axis', kind, default=None),
]
for pname, default in extra_params:
params.append(inspect.Parameter(pname, kind, default=default))
sig = inspect.Signature(params)
func.__signature__ = sig
return wrapper
return decorate
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module http://matplotlib.org/users/license.html
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if (args and kwargs):
raise AssertionError("Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func):
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs):
"""
Update self.params with supplied args.
If called, we assume self.params is a dict.
"""
self.params.update(*args, **kwargs)
@classmethod
def from_params(cls, params):
"""
In the case where the params is a mutable sequence (list or dictionary)
and it may change before this class is called, one may explicitly use a
reference to the params rather than using *args or **kwargs which will
copy the values and not reference them.
"""
result = cls()
result.params = params
return result
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
def __init__(self, addendum, join='', indents=0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func):
func.__doc__ = func.__doc__ if func.__doc__ else ''
self.addendum = self.addendum if self.addendum else ''
docitems = [func.__doc__, self.addendum]
func.__doc__ = dedent(self.join.join(docitems))
return func
def indent(text, indents=1):
if not text or not isinstance(text, str):
return ''
jointext = ''.join(['\n'] + [' '] * indents)
return jointext.join(text.split('\n'))
def make_signature(func):
"""
Returns a tuple containing the paramenter list with defaults
and parameter list.
Examples
--------
>>> def f(a, b, c=2):
>>> return a * b * c
>>> print(make_signature(f))
(['a', 'b', 'c=2'], ['a', 'b', 'c'])
"""
spec = signature(func)
if spec.defaults is None:
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else:
n_wo_defaults = len(spec.args) - len(spec.defaults)
defaults = ('',) * n_wo_defaults + tuple(spec.defaults)
args = []
for var, default in zip(spec.args, defaults):
args.append(var if default == '' else var + '=' + repr(default))
if spec.varargs:
args.append('*' + spec.varargs)
if spec.keywords:
args.append('**' + spec.keywords)
return args, spec.args
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.