repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
yuanming-hu/taichi
|
examples/tree_gravity.py
|
1
|
11571
|
# N-body gravity simulation in 300 lines of Taichi, tree method, no multipole, O(N log N)
# Author: archibate <1931127624@qq.com>, all left reserved
import taichi_glsl as tl
import taichi as ti
ti.init()
if not hasattr(ti, 'jkl'):
ti.jkl = ti.indices(1, 2, 3)
kUseTree = True
#kDisplay = 'tree mouse pixels cmap save_result'
kDisplay = 'pixels'
kResolution = 512
kShapeFactor = 1
kMaxParticles = 8192
kMaxDepth = kMaxParticles * 1
kMaxNodes = kMaxParticles * 4
kDim = 2
dt = 0.00005
LEAF = -1
TREE = -2
particle_mass = ti.field(ti.f32)
particle_pos = ti.Vector.field(kDim, ti.f32)
particle_vel = ti.Vector.field(kDim, ti.f32)
particle_table = ti.root.dense(ti.i, kMaxParticles)
particle_table.place(particle_pos).place(particle_vel).place(particle_mass)
particle_table_len = ti.field(ti.i32, ())
if kUseTree:
trash_particle_id = ti.field(ti.i32)
trash_base_parent = ti.field(ti.i32)
trash_base_geo_center = ti.Vector.field(kDim, ti.f32)
trash_base_geo_size = ti.field(ti.f32)
trash_table = ti.root.dense(ti.i, kMaxDepth)
trash_table.place(trash_particle_id)
trash_table.place(trash_base_parent, trash_base_geo_size)
trash_table.place(trash_base_geo_center)
trash_table_len = ti.field(ti.i32, ())
node_mass = ti.field(ti.f32)
node_weighted_pos = ti.Vector.field(kDim, ti.f32)
node_particle_id = ti.field(ti.i32)
node_children = ti.field(ti.i32)
node_table = ti.root.dense(ti.i, kMaxNodes)
node_table.place(node_mass, node_particle_id, node_weighted_pos)
node_table.dense({2: ti.jk, 3: ti.jkl}[kDim], 2).place(node_children)
node_table_len = ti.field(ti.i32, ())
if 'mouse' in kDisplay:
display_image = ti.Vector.field(3, ti.f32, (kResolution, kResolution))
elif len(kDisplay):
display_image = ti.field(ti.f32, (kResolution, kResolution))
@ti.func
def alloc_node():
ret = ti.atomic_add(node_table_len[None], 1)
assert ret < kMaxNodes
node_mass[ret] = 0
node_weighted_pos[ret] = particle_pos[0] * 0
node_particle_id[ret] = LEAF
for which in ti.grouped(ti.ndrange(*([2] * kDim))):
node_children[ret, which] = LEAF
return ret
@ti.func
def alloc_particle():
ret = ti.atomic_add(particle_table_len[None], 1)
assert ret < kMaxParticles
particle_mass[ret] = 0
particle_pos[ret] = particle_pos[0] * 0
particle_vel[ret] = particle_pos[0] * 0
return ret
@ti.func
def alloc_trash():
ret = ti.atomic_add(trash_table_len[None], 1)
assert ret < kMaxDepth
return ret
@ti.func
def alloc_a_node_for_particle(particle_id, parent, parent_geo_center,
parent_geo_size):
position = particle_pos[particle_id]
mass = particle_mass[particle_id]
depth = 0
while depth < kMaxDepth:
already_particle_id = node_particle_id[parent]
if already_particle_id == LEAF:
break
if already_particle_id != TREE:
node_particle_id[parent] = TREE
trash_id = alloc_trash()
trash_particle_id[trash_id] = already_particle_id
trash_base_parent[trash_id] = parent
trash_base_geo_center[trash_id] = parent_geo_center
trash_base_geo_size[trash_id] = parent_geo_size
already_pos = particle_pos[already_particle_id]
already_mass = particle_mass[already_particle_id]
node_weighted_pos[parent] -= already_pos * already_mass
node_mass[parent] -= already_mass
node_weighted_pos[parent] += position * mass
node_mass[parent] += mass
which = abs(position > parent_geo_center)
child = node_children[parent, which]
if child == LEAF:
child = alloc_node()
node_children[parent, which] = child
child_geo_size = parent_geo_size * 0.5
child_geo_center = parent_geo_center + (which - 0.5) * child_geo_size
parent_geo_center = child_geo_center
parent_geo_size = child_geo_size
parent = child
depth = depth + 1
node_particle_id[parent] = particle_id
node_weighted_pos[parent] = position * mass
node_mass[parent] = mass
@ti.kernel
def add_particle_at(mx: ti.f32, my: ti.f32, mass: ti.f32):
mouse_pos = tl.vec(mx, my) + tl.randND(2) * (0.05 / kResolution)
particle_id = alloc_particle()
if ti.static(kDim == 2):
particle_pos[particle_id] = mouse_pos
else:
particle_pos[particle_id] = tl.vec(mouse_pos, 0.0)
particle_mass[particle_id] = mass
@ti.kernel
def add_random_particles(angular_velocity: ti.f32):
num = ti.static(1)
particle_id = alloc_particle()
if ti.static(kDim == 2):
particle_pos[particle_id] = tl.randSolid2D() * 0.2 + 0.5
velocity = (particle_pos[particle_id] - 0.5) * angular_velocity * 250
particle_vel[particle_id] = tl.vec(-velocity.y, velocity.x)
else:
particle_pos[particle_id] = tl.randUnit3D() * 0.2 + 0.5
velocity = (particle_pos[particle_id].xy -
0.5) * angular_velocity * 180
particle_vel[particle_id] = tl.vec(-velocity.y, velocity.x, 0.0)
particle_mass[particle_id] = tl.randRange(0.0, 1.5)
@ti.kernel
def build_tree():
node_table_len[None] = 0
trash_table_len[None] = 0
alloc_node()
particle_id = 0
while particle_id < particle_table_len[None]:
alloc_a_node_for_particle(particle_id, 0, particle_pos[0] * 0 + 0.5,
1.0)
trash_id = 0
while trash_id < trash_table_len[None]:
alloc_a_node_for_particle(trash_particle_id[trash_id],
trash_base_parent[trash_id],
trash_base_geo_center[trash_id],
trash_base_geo_size[trash_id])
trash_id = trash_id + 1
trash_table_len[None] = 0
particle_id = particle_id + 1
@ti.func
def gravity_func(distance):
return tl.normalizePow(distance, -2, 1e-3)
@ti.func
def get_tree_gravity_at(position):
acc = particle_pos[0] * 0
trash_table_len[None] = 0
trash_id = alloc_trash()
assert trash_id == 0
trash_base_parent[trash_id] = 0
trash_base_geo_size[trash_id] = 1.0
trash_id = 0
while trash_id < trash_table_len[None]:
parent = trash_base_parent[trash_id]
parent_geo_size = trash_base_geo_size[trash_id]
particle_id = node_particle_id[parent]
if particle_id >= 0:
distance = particle_pos[particle_id] - position
acc += particle_mass[particle_id] * gravity_func(distance)
else: # TREE or LEAF
for which in ti.grouped(ti.ndrange(*([2] * kDim))):
child = node_children[parent, which]
if child == LEAF:
continue
node_center = node_weighted_pos[child] / node_mass[child]
distance = node_center - position
if distance.norm_sqr() > kShapeFactor**2 * parent_geo_size**2:
acc += node_mass[child] * gravity_func(distance)
else:
new_trash_id = alloc_trash()
child_geo_size = parent_geo_size * 0.5
trash_base_parent[new_trash_id] = child
trash_base_geo_size[new_trash_id] = child_geo_size
trash_id = trash_id + 1
return acc
@ti.func
def get_raw_gravity_at(pos):
acc = particle_pos[0] * 0
for i in range(particle_table_len[None]):
acc += particle_mass[i] * gravity_func(particle_pos[i] - pos)
return acc
@ti.kernel
def substep_raw():
for i in range(particle_table_len[None]):
acceleration = get_raw_gravity_at(particle_pos[i])
particle_vel[i] += acceleration * dt
for i in range(particle_table_len[None]):
particle_pos[i] += particle_vel[i] * dt
@ti.kernel
def substep_tree():
particle_id = 0
while particle_id < particle_table_len[None]:
acceleration = get_tree_gravity_at(particle_pos[particle_id])
particle_vel[particle_id] += acceleration * dt
# well... seems our tree inserter will break if particle out-of-bound:
particle_vel[particle_id] = tl.boundReflect(particle_pos[particle_id],
particle_vel[particle_id],
0, 1)
particle_id = particle_id + 1
for i in range(particle_table_len[None]):
particle_pos[i] += particle_vel[i] * dt
@ti.kernel
def render_arrows(mx: ti.f32, my: ti.f32):
pos = tl.vec(mx, my)
acc = get_raw_gravity_at(pos) * 0.001
tl.paintArrow(display_image, pos, acc, tl.D.yyx)
acc_tree = get_tree_gravity_at(pos) * 0.001
tl.paintArrow(display_image, pos, acc_tree, tl.D.yxy)
@ti.kernel
def render_pixels():
for i in range(particle_table_len[None]):
position = particle_pos[i].xy
pix = int(position * kResolution)
display_image[tl.clamp(pix, 0, kResolution - 1)] += 0.3
def render_tree(gui,
parent=0,
parent_geo_center=tl.vec(0.5, 0.5),
parent_geo_size=1.0):
child_geo_size = parent_geo_size * 0.5
if node_particle_id[parent] >= 0:
tl = parent_geo_center - child_geo_size
br = parent_geo_center + child_geo_size
gui.rect(tl, br, radius=1, color=0xff0000)
for which in map(ti.Vector, [[0, 0], [0, 1], [1, 0], [1, 1]]):
child = node_children[(parent, which[0], which[1])]
if child < 0:
continue
a = parent_geo_center + (which - 1) * child_geo_size
b = parent_geo_center + which * child_geo_size
child_geo_center = parent_geo_center + (which - 0.5) * child_geo_size
gui.rect(a, b, radius=1, color=0xff0000)
render_tree(gui, child, child_geo_center, child_geo_size)
if 'cmap' in kDisplay:
import matplotlib.cm as cm
cmap = cm.get_cmap('magma')
print('[Hint] Press `r` to add 512 random particles')
print('[Hint] Press `t` to add 512 random particles with angular velocity')
print('[Hint] Drag with mouse left button to add a series of particles')
print('[Hint] Drag with mouse middle button to add zero-mass particles')
print('[Hint] Click mouse right button to add a single particle')
gui = ti.GUI('Tree-code', kResolution)
while gui.running:
for e in gui.get_events(gui.PRESS):
if e.key == gui.ESCAPE:
gui.running = False
elif e.key == gui.RMB:
add_particle_at(*gui.get_cursor_pos(), 1.0)
elif e.key in 'rt':
if particle_table_len[None] + 512 < kMaxParticles:
for i in range(512):
add_random_particles(e.key == 't')
if gui.is_pressed(gui.MMB, gui.LMB):
add_particle_at(*gui.get_cursor_pos(), gui.is_pressed(gui.LMB))
if kUseTree:
build_tree()
substep_tree()
else:
substep_raw()
if len(kDisplay) and 'trace' not in kDisplay:
display_image.fill(0)
if 'mouse' in kDisplay:
render_arrows(*gui.get_cursor_pos())
if 'pixels' in kDisplay:
render_pixels()
if 'cmap' in kDisplay:
gui.set_image(cmap(display_image.to_numpy()))
elif len(kDisplay):
gui.set_image(display_image)
if 'tree' in kDisplay:
render_tree(gui)
if 'pixels' not in kDisplay:
gui.circles(particle_pos.to_numpy()[:particle_table_len[None]])
if 'save_result' in kDisplay:
gui.show(f'{gui.frame:06d}.png')
else:
gui.show()
|
mit
|
ahoyosid/scikit-learn
|
examples/cluster/plot_affinity_propagation.py
|
349
|
2304
|
"""
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
aewhatley/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
droundy/deft
|
papers/fuzzy-fmt/plot-FE_vs_gw.py
|
1
|
2561
|
#!/usr/bin/python2
#This program creates a plot of Free Energy difference vs gw at a specified
#temperature and density from data in kT*n*alldat.dat (or kT*n*alldat_tensor.dat) files
#which are generated as output data files by figs/new-melting.cpp
#NOTE: Run this plot script from directory deft/papers/fuzzy-fmt
#with comand ./plot-FE_vs_gw.py --kT [temp] --n [density] --fv [fraction of vacancies] directory [OPTIONAL: --tensor]
import os, glob
import argparse
import numpy as np
import matplotlib.pyplot as plt
import sys
parser = argparse.ArgumentParser(description='Creates a plot of FEdiff vs gw at a specified temperature, density, and fraction of vacancies.')
parser.add_argument('--kT', metavar='temperature', type=float,
help='reduced temperature - REQUIRED')
parser.add_argument('--n', metavar='density', type=float,
help='reduced temperature - REQUIRED')
parser.add_argument('--fv', metavar='fraction of vacancies', type=float,
help='fraction of vacancies - REQUIRED')
parser.add_argument('--tensor', action='store_true',
help='use tensor weight')
parser.add_argument('directory', metavar='directory', type=str,
help='directory with data to plot')
args=parser.parse_args()
kT=args.kT
n=args.n
fv=args.fv
gw = []
fe_difference = []
if args.tensor :
#files = sorted(list(glob.glob('crystallization/kT%.3f_n%.3f_*alldat_tensor.dat' % (kT, n))))
#files = sorted(list(glob.glob('newdata_tensor/phase-diagram/kT%.3f_n%.3f_fv%.2f*alldat_tensor.dat' % (kT, n, fv))))
files = sorted(list(glob.glob('%s/kT%.3f_n%.3f_fv%.2f*alldat_tensor.dat' % (args.directory, kT, n, fv))))
else :
#files = sorted(list(glob.glob('crystallization/kT%.3f_n%.3f_*alldat.dat' % (kT, n))))
#files = sorted(list(glob.glob('newdata/phase-diagram/kT%.3f_n%.3f_fv%.2f*alldat.dat' % (kT, n, fv))))
files = sorted(list(glob.glob('%s/kT%.3f_n%.3f_fv%.2f*alldat.dat' % (args.directory, kT, n, fv))))
for f in files:
data = np.loadtxt(f)
gw.append(data[3])
fe_difference.append(data[6])
plt.axhspan(0.2, -0.2, color='black', alpha=0.15, lw=0)
plt.axhspan(0.02, -0.02, color='green', alpha=0.15, lw=0)
plt.axhline(0, color='black')
if args.tensor :
plt.title('Free Energy difference vs gw for kT=%g, n=%g, fv=%g Tensor' % (kT, n, fv))
else:
plt.title('Free Energy difference vs gw for kT=%g, n=%g, fv=%g Nontensor' % (kT, n, fv))
plt.ylabel('FEdiff')
plt.xlabel('gw')
plt.plot(gw, fe_difference, '.-')
plt.show()
|
gpl-2.0
|
cybernet14/scikit-learn
|
sklearn/cluster/tests/test_birch.py
|
342
|
5603
|
"""
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
|
bsd-3-clause
|
rohangoel96/IRCLogParser
|
IRCLogParser/lib/deprecated/scripts/parser-time_series.py
|
2
|
6235
|
#This code generates a time-series graph. Such a graph has users on the y axis and msg transmission time on x axis.This means that if there exit 4 users- A,B,C,D.
#Then if any of these users send a message at time t, then we put a dot infront of that user at time t in the graph.
import os.path
import re
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pylab
import pygraphviz as pygraphviz
import numpy
import datetime
import time
import pandas as pd
#yofel shadeslayer, yofel pheonixbrd
for iterator in range(2,3):
for fileiterator in range(1,2):
if(fileiterator<10):
sttring="/home/dhruvie/LOP/2013/"+str(iterator)+"/0"
sttring=sttring+str(fileiterator)+"/#kubuntu-devel.txt"
else:
sttring="/home/dhruvie/LOP/2013/"+str(iterator)+"/"
sttring=sttring+str(fileiterator)+"/#kubuntu-devel.txt"
if not os.path.exists(sttring):
continue
with open(sttring) as f:
content = f.readlines() #contents stores all the lines of the file kubunutu-devel
nicks = [] #list of all the nicknames
send_time = [] #list of all the times a user sends a message to another user
conv_time = []
Numofmsg = []
channel= "#kubuntu-devel" #channel name
groups = ['yofel_','phoenix_firebrd', 'shadeslayer'] #first will be assigned UID 50, second 100, third 150 and so on
groupsnum = []
groupsnum.append(50)
for i in range(0, len(groups)-1):
groupsnum.append(50+groupsnum[i])
#code for getting all the nicknames in a list
for i in content:
if(i[0] != '=' and "] <" in i and "> " in i):
m = re.search(r"\<(.*?)\>", i)
if m.group(0) not in nicks:
nicks.append(m.group(0)) #used regex to get the string between <> and appended it to the nicks list
for i in xrange(0,len(nicks)):
nicks[i] = nicks[i][1:-1] #removed <> from the nicknames
for i in xrange(0,len(nicks)):
if(nicks[i][len(nicks[i])-1]=='\\'):
nicks[i]=nicks[i][:-1]
nicks[i]=nicks[i]+'CR'
for j in content:
if(j[0]=='=' and "changed the topic of" not in j):
line1=j[j.find("=")+1:j.find(" is")]
line2=j[j.find("wn as")+1:j.find("\n")]
line1=line1[3:]
line2=line2[5:]
if(line1[len(line1)-1]=='\\'):
line1=line1[:-1]
line1=line1 + 'CR'
if(line2[len(line2)-1]=='\\'):
line2=line2[:-1]
line2=line2 + 'CR'
if line1 not in nicks:
nicks.append(line1)
if line2 not in nicks:
nicks.append(line2)
#code for forming list of lists for avoiding nickname duplicacy
x=[[] for i in range(len(nicks))]
for line in content:
if(line[0]=='=' and "changed the topic of" not in line):
line1=line[line.find("=")+1:line.find(" is")]
line2=line[line.find("wn as")+1:line.find("\n")]
line1=line1[3:]
line2=line2[5:]
if(line1[len(line1)-1]=='\\'):
line1=line1[:-1]
line1=line1 + 'CR'
if(line2[len(line2)-1]=='\\'):
line2=line2[:-1]
line2=line2 + 'CR'
for i in range(len(nicks)):
if line1 in x[i]:
x[i].append(line1)
x[i].append(line2)
break
if not x[i]:
x[i].append(line1)
x[i].append(line2)
break
#code for making relation map between clients
for line in content:
flag_comma = 0
if(line[0] != '=' and "] <" in line and "> " in line):
m = re.search(r"\<(.*?)\>", line)
var = m.group(0)[1:-1]
if(var[len(var)-1]=='\\'):
var=var[:-1]
var=var + 'CR'
for d in range(len(nicks)):
if var in x[d]:
pehla = x[d][0]
break
else:
pehla=var
for i in nicks:
data=[e.strip() for e in line.split(':')]
data[1]=data[1][data[1].find(">")+1:len(data[1])]
data[1]=data[1][1:]
if not data[1]:
break
for ik in xrange(0,len(data)):
if(data[ik] and data[ik][len(data[ik])-1]=='\\'):
data[ik]=data[ik][:-1]
data[ik]=data[ik] + 'CR'
for z in data:
if(z==i):
send_time.append(line[1:6])
if(var != i):
for d in range(len(nicks)):
if i in x[d]:
second=x[d][0]
break
else:
second=i
if pehla in groups and second in groups:
conv_time.append(line[1:6]) #We store time and index of sender, so that in our graph we can put a mark on that index at that time.
Numofmsg.append(groupsnum[groups.index(pehla)])
if "," in data[1]:
flag_comma = 1
data1=[e.strip() for e in data[1].split(',')]
for ij in xrange(0,len(data1)):
if(data1[ij] and data1[ij][len(data1[ij])-1]=='\\'):
data1[ij]=data1[ij][:-1]
data1[ij]=data1[ij] + 'CR'
for j in data1:
if(j==i):
send_time.append(line[1:6])
if(var != i):
for d in range(len(nicks)):
if i in x[d]:
second=x[d][0]
break
else:
second=i
if pehla in groups and second in groups:
conv_time.append(line[1:6])
Numofmsg.append(groupsnum[groups.index(pehla)])
if(flag_comma == 0):
search2=line[line.find(">")+1:line.find(", ")]
search2=search2[1:]
if(search2[len(search2)-1]=='\\'):
search2=search2[:-1]
search2=search2 + 'CR'
if(search2==i):
send_time.append(line[1:6])
if(var != i):
for d in range(len(nicks)):
if i in x[d]:
second=x[d][0]
break
else:
second=i
if pehla in groups and second in groups:
conv_time.append(line[1:6])
Numofmsg.append(groupsnum[groups.index(pehla)])
print(conv_time)
print(Numofmsg)
data = {'Time': conv_time,
'Message_Sent': Numofmsg}
df = pd.DataFrame(data, columns = ['Time', 'Message_Sent'])
df.index = df['Time']
del df['Time']
df
print(df)
axes = plt.gca()
axes.set_ylim([0,200])
df.plot(ax=axes ,style=['o','rx'])
plt.savefig('time-series.png')
plt.close()
#Here we have plotted the graph with msg transmission time as x axis and users(A(50),B(100),C(150).....) as y axis.
#User who sends more messages will have a higher density of dots infront of its index.
|
mit
|
ZENGXH/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
198
|
29735
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
Ziqi-Li/bknqgis
|
bokeh/bokeh/plotting/tests/test_helpers.py
|
1
|
5233
|
import pytest
from bokeh.models import ColumnDataSource
from bokeh.models.ranges import Range1d, DataRange1d, FactorRange
from bokeh.models.scales import LinearScale, LogScale, CategoricalScale
from bokeh.plotting.helpers import _get_legend_item_label, _get_scale, _get_range, _stack
def test__stack_raises_when_spec_in_kwargs():
with pytest.raises(ValueError) as e:
_stack(['a', 'b'], 'foo', 'bar', foo=10)
assert str(e).endswith("Stack property 'foo' cannot appear in keyword args")
with pytest.raises(ValueError) as e:
_stack(['a', 'b'], 'foo', 'bar', bar=10)
assert str(e).endswith("Stack property 'bar' cannot appear in keyword args")
def test__stack_raises_when_kwargs_list_lengths_differ():
with pytest.raises(ValueError) as e:
_stack(['a', 'b'], 'foo', 'bar', baz=[1, 2], quux=[3,4,5])
assert str(e).endswith("Keyword argument sequences for broadcasting must all be the same lengths. Got lengths: [2, 3]")
def test__stack_raises_when_kwargs_list_lengths_and_stackers_lengths_differ():
with pytest.raises(ValueError) as e:
_stack(['a', 'b', 'c'], 'foo', 'bar', baz=[1, 2], quux=[3,4])
assert str(e).endswith("Keyword argument sequences for broadcasting must be the same length as stackers")
def test__stack_broadcast_with_no_kwargs():
stackers = ['a', 'b', 'c', 'd']
kws = _stack(stackers, 'start', 'end')
assert len(kws) == len(stackers)
for i, kw in enumerate(kws):
assert set(['start', 'end']) == set(kw.keys())
assert list(kw['start']['expr'].fields) == stackers[:i]
assert list(kw['end']['expr'].fields) == stackers[:(i+1)]
def test__stack_broadcast_with_scalar_kwargs():
stackers = ['a', 'b', 'c', 'd']
kws = _stack(stackers, 'start', 'end', foo=10, bar="baz")
assert len(kws) == len(stackers)
for i, kw in enumerate(kws):
assert set(['start', 'end', 'foo', 'bar']) == set(kw.keys())
assert list(kw['start']['expr'].fields) == stackers[:i]
assert list(kw['end']['expr'].fields) == stackers[:(i+1)]
assert kw['foo'] == 10
assert kw['bar'] == "baz"
def test__stack_broadcast_with_list_kwargs():
stackers = ['a', 'b', 'c', 'd']
kws = _stack(stackers, 'start', 'end', foo=[10, 20, 30, 40], bar="baz")
assert len(kws) == len(stackers)
for i, kw in enumerate(kws):
assert set(['start', 'end', 'foo', 'bar']) == set(kw.keys())
assert list(kw['start']['expr'].fields) == stackers[:i]
assert list(kw['end']['expr'].fields) == stackers[:(i+1)]
assert kw['foo'] == [10, 20, 30, 40][i]
assert kw['bar'] == "baz"
# _get_legend_item_label
def test_if_legend_is_something_exotic_that_it_is_passed_directly_to_label():
kwargs = {
'legend': {'field': 'milk'}
}
label = _get_legend_item_label(kwargs)
assert label == {'field': 'milk'}
def test_if_legend_is_a_string_but_no_source_then_label_is_set_as_value():
kwargs = {
'legend': 'label'
}
label = _get_legend_item_label(kwargs)
assert label == {'value': 'label'}
def test_if_legend_is_a_string_and_source_with_that_column_then_field():
kwargs = {
'legend': 'label',
'source': ColumnDataSource(dict(label=[1, 2]))
}
label = _get_legend_item_label(kwargs)
assert label == {'field': 'label'}
def test_if_legend_is_a_string_and_source_without_column_name_then_value():
kwargs = {
'legend': 'not_a_column_label',
'source': ColumnDataSource(dict(label=[1, 2]))
}
label = _get_legend_item_label(kwargs)
assert label == {'value': 'not_a_column_label'}
def test__get_scale_numeric_range_linear_axis():
s = _get_scale(Range1d(), "linear")
assert isinstance(s, LinearScale)
s = _get_scale(Range1d(), "datetime")
assert isinstance(s, LinearScale)
s = _get_scale(Range1d(), "auto")
assert isinstance(s, LinearScale)
def test__get_scale_numeric_range_log_axis():
s = _get_scale(DataRange1d(), "log")
assert isinstance(s, LogScale)
def test__get_scale_factor_range():
s = _get_scale(FactorRange(), "auto")
assert isinstance(s, CategoricalScale)
def test__get_range_with_None():
r = _get_range(None)
assert isinstance(r, DataRange1d)
def test__get_range_with_Range():
for t in [Range1d, DataRange1d, FactorRange]:
rng = t()
r = _get_range(rng)
assert r is rng
def test__get_range_with_string_seq():
f = ["foo" ,"end", "baz"]
for t in [list, tuple]:
r = _get_range(t(f))
assert isinstance(r, FactorRange)
# FactorRange accepts Seq, but _get_range always sets a list copy
assert r.factors == f
def test__get_range_with_float_bounds():
r = _get_range((1.2, 10))
assert isinstance(r, Range1d)
assert r.start == 1.2
assert r.end == 10
r = _get_range([1.2, 10])
assert isinstance(r, Range1d)
assert r.start == 1.2
assert r.end == 10
def test_get_range_with_pandas_group():
from bokeh.sampledata.iris import flowers
g = flowers.groupby('species')
r = _get_range(g)
assert isinstance(r, FactorRange)
assert r.factors == ['setosa', 'versicolor', 'virginica'] # should always be sorted
|
gpl-2.0
|
rhyolight/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py
|
69
|
20839
|
"""
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
|
agpl-3.0
|
plissonf/scikit-learn
|
examples/classification/plot_lda.py
|
70
|
2413
|
"""
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
|
bsd-3-clause
|
justincassidy/scikit-learn
|
examples/svm/plot_rbf_parameters.py
|
132
|
8096
|
'''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
|
bsd-3-clause
|
evgchz/scikit-learn
|
benchmarks/bench_glmnet.py
|
297
|
3848
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
surgebiswas/poker
|
PokerBots_2017/Johnny/scipy/special/add_newdocs.py
|
8
|
137472
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)
is used.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function.
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a, x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a, x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "_gammaln",
"""
Internal function, use ``gammaln`` instead.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptitic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptitic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptitic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptitic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
See also
--------
jv : Bessel function of real order and complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
See also
--------
jv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
See also
--------
jv
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : float
Degree.
x : float
Argument. Must be ``|x| <= 1``.
Returns
-------
res : float
The value of the function.
See Also
--------
lpmn : Similar, but computes values for all orders 0..m and degrees 0..n.
clpmn : Similar to `lpmn` but allows a complex argument.
Notes
-----
It is possible to extend the domain of this function to all
complex m, v, x, but this is not yet implemented.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z)
Spence's function, also known as the dilogarithm. It is defined to
be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Note that there is a different convention which defines Spence's
function by the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.wofz(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$wofz(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining ``loggamma`` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas ``loggamma`` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make ``loggama`` useful for working in complex logspace. However,
``loggamma`` necessarily returns complex outputs for real inputs,
so if you want to work only with real numbers use `gammaln`. On
the real line the two functions are related by ``exp(loggamma(x))
= gammasgn(x)*exp(gammaln(x))``, though in practice rounding
errors will introduce small spurious imaginary components in
``exp(loggamma(x))``.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
|
mit
|
WhatDo/FlowFairy
|
examples/denoise_reg_mult/stages.py
|
1
|
3899
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import os
import io
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from flowfairy.core.stage import register, Stage, stage
from flowfairy.conf import settings
def get_log_dir():
return os.path.join(settings.LOG_DIR, settings.LOGNAME)
def norm(tensor):
return tf.div((tensor - tf.reduce_min(tensor)), (tf.reduce_max(tensor) - tf.reduce_min(tensor)))
def normnp(val):
return (val - np.min(val))/(np.max(val)-np.min(val))
@register(250)
class SummaryStage(Stage):
def fig2rgb_array(self, expand=True):
self.figure.canvas.draw()
buf = self.figure.canvas.tostring_rgb()
ncols, nrows = self.figure.canvas.get_width_height()
shape = (nrows, ncols, 3) if not expand else (1, nrows, ncols, 3)
return np.fromstring(buf, dtype=np.uint8).reshape(shape)
def reset_fig(self):
self.figure = plt.figure(num=0, figsize=(6,4), dpi=300)
self.figure.clf()
def before(self, sess, net):
tf.summary.scalar('acc', net.train_acc)
tf.summary.scalar('cost', net.train_cost)
tf.summary.scalar('val_acc', net.val_acc)
tf.summary.scalar('val_cost', net.val_cost)
# make histogram
tf.contrib.layers.summarize_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.net = net
tf.summary.audio('input',norm(tf.cast(self.net.train_x, tf.float32)), settings.SAMPLERATE)
tf.summary.audio('target', norm(tf.cast(self.net.train_y, tf.float32)), settings.SAMPLERATE)
tf.summary.audio('pred', norm(tf.cast(self.net.train_pred, tf.float32)), settings.SAMPLERATE)
self.reset_fig()
img = self.fig2rgb_array()
self.train_image_in = tf.placeholder(np.uint8, shape=img.shape)
self.train_image = tf.Variable(np.zeros(img.shape, dtype=np.uint8), trainable=False, name='train_graph_image')
self.train_image_assign = self.train_image.assign(self.train_image_in)
tf.summary.image('train_graph', self.train_image)
self.val_image_in = tf.placeholder(np.uint8, shape=img.shape)
self.val_image = tf.Variable(np.zeros(img.shape, dtype=np.uint8), trainable=False, name='val_graph_image')
self.val_image_assign = self.val_image.assign(self.val_image_in)
tf.summary.image('val_graph', self.val_image)
self.merged = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(get_log_dir(), sess.graph)
def plot(self, sess, pred, x, y, chunk):
self.reset_fig()
res, x, y, c = sess.run([pred, x, y, chunk])
start = c[0] - settings.CHUNK
end = start + settings.CHUNK *3
plt.subplot('111').plot(normnp(res[0,start:end]),'r')
plt.subplot('111').plot(normnp(y[0,start:end]),'b', alpha=0.5)
plt.subplot('111').plot(normnp(x[0,start:end]),'g', alpha=0.5)
def draw_img(self, sess):
self.plot(sess, self.net.train_pred, self.net.train_x, self.net.train_y, self.net.train_chunk)
sess.run(self.train_image_assign, feed_dict={self.train_image_in: self.fig2rgb_array()})
self.plot(sess, self.net.val_pred, self.net.val_x, self.net.val_y, self.net.val_chunk)
sess.run(self.val_image_assign, feed_dict={self.val_image_in: self.fig2rgb_array()})
def run(self, sess, i):
self.draw_img(sess)
summary = sess.run(self.merged)
self.writer.add_summary(summary, i)
@register()
class TrainingStage(Stage):
def before(self, sess, net):
self.optimizer = net.optimizer
def run(self, sess, i):
sess.run(self.optimizer)
@register(10000)
class SavingStage(Stage):
def before(self, sess, net):
self.saver = tf.train.Saver()
def run(self, sess, i):
self.saver.save(sess, get_log_dir(), global_step=i)
|
mit
|
mojoboss/scikit-learn
|
sklearn/neighbors/classification.py
|
106
|
13987
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
joonaslomps/hiragana-ocr
|
code/test.py
|
1
|
20424
|
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
from random import shuffle
from matplotlib import pyplot as plt
from os import listdir
from os.path import isfile, join
letters = ["a","i","u","e","o","ka","ki","ku","ke","ko","sa","shi","su","se","so","fu","ha","hi","ho","he","ma","mi","mu","me","mo","n","na","ni","no","nu","ne","ra","ri","ru","re","ro","ta","chi","to","te","tsu","wa","wo","ya","yo","yu"]
lettersN = range(46)
filePrefixes = ["a","i","u","e","o","ka","ki","ku","ke","ko","sa","shi","su","se","so","fu","ha","hi","ho","he","ma","mi","mu","me","mo","n_","na","ni","no","nu","ne","ra","ri","ru","re","ro","ta","chi","to","te","tsu","wa","wo","ya","yo","yu", "da", "ji_", "du", "de", "do","zo","ji(shi)","zu","ze","zo","ba","bi","bu","be","bo","pa","pi","pu","pe","po", "ga","gi","gu","ge","go"]
SZ=50
bin_n = 16 # Number of bins
affine_flags = cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR
SVM_GAMMA = 5.383
SVM_C = 2.67
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv2.warpAffine(img,M,(SZ, SZ),flags=affine_flags)
return img
def hog(img):
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
# quantizing binvalues in (0...16)
bins = np.int32(bin_n*ang/(2*np.pi))
x = 25
# Divide to 4 sub-squares
bin_cells = bins[:x,:x], bins[x:,:x], bins[:x,x:], bins[x:,x:]
mag_cells = mag[:x,:x], mag[x:,:x], mag[:x,x:], mag[x:,x:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
return hist
def printNthInList(list, n):
text = ""
for i in range(2500):
if i % 50 > 0:
if list[n][i] != 0:
text += "+"
else:
text += " "
else:
text += "\n"
print text
# CREATE NxNpx data from picture
def make_unified_data(N):
for letter in letters:
for i in range(10):
image = cv2.imread("../data/"+letter+"/"+str(i)+".png")
image = cv2.resize(image, (N,N))
cv2.imwrite("../data/"+letter+"/"+str(i)+"_"+str(N)+"x"+str(N)+".png", image)
def make_usable_data(x, dataN, offset):
onePicPx = len(x[0]) * len(x[0][0])
# Make each pictures to 1-dim array (train data) 8 picture per letter
offset = offset
data = []
for i in range(len(x)/10):
for i in range(dataN):
data.append(x[offset+i])
offset += 10
data = np.array(data)
data = data.reshape(-1,onePicPx).astype(np.float32)
return data
# Load in the letters
def generate_image_data():
cells = []
for letter in letters:
for i in range(10):
if letter == "sa" and i == 6:
image = cv2.imread("../data/"+letter+"/"+str(i)+"_50x50.png",0)
thresh = cv2.threshold(image,100,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
cells.append(thresh)
else:
image = cv2.imread("../data/"+letter+"/"+str(i)+"_50x50.png",0)
thresh = cv2.threshold(image,100,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
cells.append(thresh)
# Make images to np array
x = np.array(cells)
deskewed = [map(deskew,row) for row in x]
hogdata = [map(hog,row) for row in deskewed]
return x,hogdata
######################################################
# SVM
######################################################
def test_SVM_accuracy(x, trainN, testN, name):
## TRAINING ###
# Make each pictures to 1-dim array (train data) trainN picture per letter
train = make_usable_data(x, trainN, 0)
# Generate integer values for letters
train_labels = np.repeat(lettersN, trainN)[:,np.newaxis]
# Make svm
svm = cv2.ml.SVM_create()
svm.setGamma(SVM_GAMMA)
svm.setC(SVM_C)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
ok = svm.train(train,cv2.ml.ROW_SAMPLE,train_labels)
### TESTING ###
# Make each pictures to 1-dim array (test data) testN pictures per letter
test = make_usable_data(x, testN, trainN)
# Generate integer values for letters
test_labels = np.repeat(lettersN, testN)[:,np.newaxis]
result = svm.predict(test)
### CHECK ACCURACY ###
mask = result[1]==test_labels
correct = np.count_nonzero(mask)
accuracy = correct*100.0/result[1].size
print name + str(accuracy)
######################################################
# SVM
######################################################
######################################################
# k-Nearest Neighbour - with picture
# x = Array of characters in format of [[px,px,px],[px,px,px],[px,px,px]] - 3x3px image.
#
######################################################
def test_kNN_accuracy(x, trainN, testN, name):
## TRAINING ###
# Make each pictures to 1-dim array (train data) trainN picture per letter
train = make_usable_data(x, trainN, 0)
print len(train)
print len(train[0])
# Generate integer values for letters
train_labels = np.repeat(lettersN, trainN)[:,np.newaxis]
# Do the real k-nearest neighbour search
knn = cv2.ml.KNearest_create()
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels)
### TESTING ###
# Make each pictures to 1-dim array (test data) testN pictures per letter
test = make_usable_data(x, testN, trainN)
ret,result,neighbours,dist = knn.findNearest(test,k=4)
test_labels = np.repeat(lettersN, testN)[:,np.newaxis]
### CHECK ACCURACY ###
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print name + str(accuracy)
######################################################
# k-Nearest Neighbour - with picture
######################################################
# Merges the pari of rectangles into one
def pair_pairs(pairs, rects):
for pair in pairs:
upper = None
lower = None
if pair[0][1] > pair[1][1]:
upper = pair[1]
lower = pair[0]
else:
upper = pair[0]
lower = pair[1]
x = min(upper[0], lower[0])
y = upper[1]
w = abs(lower[0] - upper[0]) + max(upper[2], lower[2])
h = lower[1] - upper[1] + lower[3]
rects.append((x,y,w,h))
return rects
def find_pairs(rects, offset):
pairs = []
changed = []
# Fix contours for side by side
for i in range(len(rects)):
for j in range(len(rects)):
if j <= i:
continue
c_1 = rects[i]
c_2 = rects[j]
if abs(c_1[0]-c_2[0]) <= offset:
pairs.append([c_1,c_2])
changed.append(c_1)
changed.append(c_2)
return pairs, changed
def rec_from_image(fileLocation, rawdata, hogdata):
image = cv2.imread(fileLocation,0)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
white = np.copy(image)
for i in range(len(white)):
for j in range(len(white[0])):
white[i][j] = 255
blur = cv2.GaussianBlur(image, (3, 3), 0)
frameDelta = cv2.absdiff(white, blur)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
rects = []
for c in cnts:
rects.append(cv2.boundingRect(c))
# Fix contours for up and down
pairs, changed = find_pairs(rects, 10)
rects = pair_pairs(pairs, rects)
for c in changed:
if c in rects:
rects.remove(c)
pairs, changed = find_pairs(rects, 50)
rects = pair_pairs(pairs, rects)
for c in changed:
if c in rects:
rects.remove(c)
knnRawImage = np.copy(image)
knnHOGImage = np.copy(image)
SVMRawImage = np.copy(image)
SVMHOGImage = np.copy(image)
train_labels = np.repeat(lettersN, 10)[:,np.newaxis]
trainRaw = make_usable_data(rawdata, 10, 0)
trainHOG = make_usable_data(hogdata, 10, 0)
# ### Train kNN-raw
knnRaw = cv2.ml.KNearest_create()
knnRaw.train(trainRaw, cv2.ml.ROW_SAMPLE, train_labels)
print "kNN-Raw trained"
# ### Train kNN-HOG
knnHOG = cv2.ml.KNearest_create()
knnHOG.train(trainHOG, cv2.ml.ROW_SAMPLE, train_labels)
print "kNN-HOG trained"
# ### Train SVM-raw
svmRAW = cv2.ml.SVM_create()
svmRAW.setGamma(SVM_GAMMA)
svmRAW.setC(SVM_C)
svmRAW.setKernel(cv2.ml.SVM_LINEAR)
svmRAW.setType(cv2.ml.SVM_C_SVC)
ok = svmRAW.train(trainRaw,cv2.ml.ROW_SAMPLE,train_labels)
print "SVM-HOG trained"
# ### Train SVM-raw
svmHOG = cv2.ml.SVM_create()
svmHOG.setGamma(SVM_GAMMA)
svmHOG.setC(SVM_C)
svmHOG.setKernel(cv2.ml.SVM_LINEAR)
svmHOG.setType(cv2.ml.SVM_C_SVC)
ok = svmHOG.train(trainHOG,cv2.ml.ROW_SAMPLE,train_labels)
print "SVM-HOG trained"
for rect in rects:
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = rect
rectImage = image[y:h+y,x:w+x]
rectImage = cv2.resize(rectImage, (50,50))
thresh = cv2.threshold(rectImage,100,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
test_raw = np.array([thresh])
deskewed = [map(deskew,row) for row in test_raw]
test_hogdata = [map(hog,row) for row in deskewed]
test_hogdata = np.array(test_hogdata)
test_raw = test_raw.reshape(-1,2500).astype(np.float32)
test_hogdata = test_hogdata.reshape(-1,3200).astype(np.float32)
ret,result,neighbours,dist = knnRaw.findNearest(test_raw, k=4)
cv2.putText(knnRawImage,letters[int(result[0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
ret,result,neighbours,dist = knnHOG.findNearest(test_hogdata, k=4)
cv2.putText(knnHOGImage,letters[int(result[0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
result = svmRAW.predict(test_raw)
cv2.putText(SVMRawImage,letters[int(result[1][0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
result = svmHOG.predict(test_hogdata)
cv2.putText(SVMHOGImage,letters[int(result[1][0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
cv2.imshow("image", image)
cv2.imshow("knnRawImage", knnRawImage)
cv2.imshow("knnHOGImage", knnHOGImage)
cv2.imshow("SVMRawImage", SVMRawImage)
cv2.imshow("SVMHOGImage", SVMHOGImage)
# TEST
def test_kNN_HOG_accuracy_full(test_amount):
knnRaw = cv2.ml.KNearest_create()
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
thresh = np.array([thresh])
deskewed = [map(deskew,row) for row in thresh]
hogData = [map(hog,row) for row in deskewed]
if(len(nameFiles) - test_amount < j):
test.append(hogData)
test_labels.append(filePrefixes.index(name))
else:
train.append(hogData)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,3200).astype(np.float32)
knnRaw.train(x, cv2.ml.ROW_SAMPLE, np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,3200).astype(np.float32)
ret,result,neighbours,dist = knnRaw.findNearest(y,k=4)
correct = 0
for i in range(len(neighbours)):
# print str(neighbours[i]) + " - " + str(test_labels[i]) + " - " + str(result[i])
if test_labels[i] == result[i][0]:
correct = correct + 1
accuracy = correct*100.0/result.size
print "kNN - HOG: " + str(accuracy) + "%"
def test_kNN_RAW_accuracy_full(test_amount):
knnRaw = cv2.ml.KNearest_create()
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
if(len(nameFiles) - test_amount <= j):
test.append(thresh)
test_labels.append(filePrefixes.index(name))
else:
train.append(thresh)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,2500).astype(np.float32)
knnRaw.train(x, cv2.ml.ROW_SAMPLE, np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,2500).astype(np.float32)
ret,result,neighbours,dist = knnRaw.findNearest(y,k=4)
correct = 0
for i in range(len(neighbours)):
# print str(neighbours[i]) + " - " + str(test_labels[i]) + " - " + str(result[i])
if test_labels[i] == result[i][0]:
correct = correct + 1
accuracy = correct*100.0/result.size
print "kNN - RAW: " + str(accuracy) + "%"
def test_SVM_RAW_accuracy_full(test_amount):
svm = cv2.ml.SVM_create()
svm.setGamma(SVM_GAMMA)
svm.setC(SVM_C)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
if(len(nameFiles) - test_amount <= j):
test.append(thresh)
test_labels.append(filePrefixes.index(name))
else:
train.append(thresh)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,2500).astype(np.float32)
ok = svm.train(x,cv2.ml.ROW_SAMPLE,np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,2500).astype(np.float32)
result = svm.predict(y)
correct = 0
for i in range(len(result[1])):
# print str(test_labels[i]) + " - " + str(result[1][i][0])
if test_labels[i] == result[1][i][0]:
correct = correct + 1
accuracy = correct*100.0/result[1].size
print "SVM - RAW: " + str(accuracy) + "%"
def test_SVM_HOG_accuracy_full(test_amount):
svm = cv2.ml.SVM_create()
svm.setGamma(SVM_GAMMA)
svm.setC(SVM_C)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
thresh = np.array([thresh])
deskewed = [map(deskew,row) for row in thresh]
hogData = [map(hog,row) for row in deskewed]
if(len(nameFiles) - test_amount < j):
test.append(hogData)
test_labels.append(filePrefixes.index(name))
else:
train.append(hogData)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,3200).astype(np.float32)
ok = svm.train(x,cv2.ml.ROW_SAMPLE,np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,3200).astype(np.float32)
result = svm.predict(y)
correct = 0
for i in range(len(result[1])):
# print str(test_labels[i]) + " - " + str(result[1][i][0])
if test_labels[i] == result[1][i][0]:
correct = correct + 1
accuracy = correct*100.0/result[1].size
print "SVM - HOG: " + str(accuracy) + "%"
####################################################################
# From https://gist.github.com/moshekaplan/5106221#file-test_surf-py
def filter_matches(kp1, kp2, matches, ratio = 0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
kp_pairs = zip(mkp1, mkp2)
return kp_pairs
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
vis[:h1, :w1] = img1
vis[:h2, w1:w1+w2] = img2
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
cv2.polylines(vis, [corners], True, (255, 255, 255))
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1 = np.int32([kpp[0].pt for kpp in kp_pairs])
p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
kp_color = (51, 103, 236)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv2.circle(vis, (x1, y1), 2, col, -1)
cv2.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv2.line(vis, (x1, y1), (x2, y2), green)
cv2.imshow(win, vis)
def draw_matches(window_name, kp_pairs, img1, img2):
"""Draws the matches for """
mkp1, mkp2 = zip(*kp_pairs)
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
if len(kp_pairs) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
#print '%d / %d inliers/matched' % (np.sum(status), len(status))
else:
H, status = None, None
#print '%d matches found, not enough for homography estimation' % len(p1)
if len(p1):
explore_match(window_name, img1, img2, kp_pairs, status, H)
####################################################################
# x, hogdata = generate_image_data()
# test_kNN_accuracy(x,8,2, "kNN: Raw-Data accuracy: ") # kNN with raw pixel data
# test_kNN_accuracy(hogdata,8,2, "kNN: HOG data accuracy: ") # kNN with HOG data
# test_SVM_accuracy(x,8,2, "SVM: Raw-Data data accuracy: ") # SVM with raw pixel data
# test_SVM_accuracy(hogdata,8,2, "SVM: HOG data accuracy: ") # SVM with HOG data
# testFile = "../data/long/nihon.png"
# rec_from_image(testFile, x, hogdata)
# Test with whole dataset.
# test_kNN_HOG_accuracy_full(80)
# test_kNN_RAW_accuracy_full(80)
# test_SVM_RAW_accuracy_full(80)
# test_SVM_HOG_accuracy_full(80)
folder = "../data/templates/singles_50x50/"
surf = cv2.SURF(100)
surf.extended = True
files = ["ba_86.png","go_172.png","po_64.png","hi_157.png","de_28.png","go_111.png","ho_91.png","ya_134.png","to_169.png","ki_166.png"]
matcher = cv2.BFMatcher(cv2.NORM_L2)
# for file in files:
# image = cv2.imread(folder + file, 0)
# kp, des = surf.detectAndCompute(image, None)
# print len(kp)
# img2 = cv2.drawKeypoints(image,kp,None,(255,0,0),4)
image1 = cv2.imread(folder + files[1], 0)
kp1, des1 = surf.detectAndCompute(image1, None)
# image1keypoints = cv2.drawKeypoints(image1,kp1,None,(255,0,0),4)
image2 = cv2.imread(folder + files[3], 0)
kp2, des2 = surf.detectAndCompute(image2, None)
# image2keypoints = cv2.drawKeypoints(image2,kp2,None,(255,0,0),4)
print len(kp1)
print len(kp2)
print len(des1)
print len(des2)
raw_matches = matcher.knnMatch(des1, trainDescriptors = des2, k = 2) #2
kp_pairs = filter_matches(kp1, kp2, raw_matches)
draw_matches("test", kp_pairs, image1, image2)
cv2.waitKey(0)
|
mit
|
thientu/scikit-learn
|
sklearn/cluster/setup.py
|
263
|
1449
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
thientu/scikit-learn
|
sklearn/datasets/lfw.py
|
141
|
19372
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
|
bsd-3-clause
|
btabibian/scikit-learn
|
examples/linear_model/plot_sgd_loss_functions.py
|
86
|
1234
|
"""
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y=1, f(x))$")
plt.show()
|
bsd-3-clause
|
meduz/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
24
|
14430
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples',
lda._perplexity_precomp_distr, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics',
lda._perplexity_precomp_distr, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_doc_topic_distr_deprecation():
# Test that the appropriate warning message is displayed when a user
# attempts to pass the doc_topic_distr argument to the perplexity method
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr1 = lda.fit_transform(X)
distr2 = None
assert_warns(DeprecationWarning, lda.perplexity, X, distr1)
assert_warns(DeprecationWarning, lda.perplexity, X, distr2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
samnashi/howdoflawsgetlonger
|
generator_columns_tester.py
|
1
|
10893
|
from __future__ import print_function
import numpy as np
from random import shuffle
import matplotlib.pyplot as plt
from keras.models import Sequential, Model
from keras.utils import plot_model
from keras.layers import Dense, LSTM, GRU, Flatten, Input, Reshape, TimeDistributed, Bidirectional, Dense, Dropout, \
Activation, Flatten, Conv1D, MaxPooling1D, GlobalAveragePooling1D, AveragePooling1D, concatenate
from keras import metrics
import pandas as pd
import scipy.io as sio
import os
import json
import scattergro_utils as sg_utils
import sklearn.preprocessing
#def batch_size_verifier
#you limit the # of calls keras calls the generator OUTSIDE the generator.
#each time you fit, dataset length // batch size. round down!
def np_array_pair_generator(data,labels,start_at=0,generator_batch_size=64,scaled=True,scaler_type = 'standard',scale_what = 'data'): #shape is something like 1, 11520, 11
'''Custom batch-yielding generator for Scattergro Output. You need to feed it the numpy array after running "Parse_Individual_Arrays script
data and labels are self-explanatory.
Parameters:
start_at: configures where in the arrays do the generator start yielding (to ensure an LSTM doesn't always start at the same place
generator_batch_size: how many "rows" of the numpy array does the generator yield each time
scaled: whether the output is scaled or not.
scaler_type: which sklearn scaler to call
scale_what = either the data/label (the whole array), or the yield.'''
if scaled == True:
if scaler_type == 'standard':
scaler = sklearn.preprocessing.StandardScaler()
print('standard scaler initialized: {}'.format(scaler))
elif scaler_type == 'minmax':
scaler = sklearn.preprocessing.MinMaxScaler()
elif scaler_type == 'robust':
scaler = sklearn.preprocessing.RobustScaler()
else:
scaler = sklearn.preprocessing.StandardScaler()
print("scaled: {}, scaler_type: {}".format(scaled, scaler_type))
# while 1:
def np_array_pair_generator(data,labels,start_at=0,generator_batch_size=64,scaled=True,scaler_type = 'standard',scale_what = 'data'): #shape is something like 1, 11520, 11
'''Custom batch-yielding generator for Scattergro Output. You need to feed it the numpy array after running "Parse_Individual_Arrays script
data and labels are self-explanatory.
Parameters:
start_at: configures where in the arrays do the generator start yielding (to ensure an LSTM doesn't always start at the same place
generator_batch_size: how many "rows" of the numpy array does the generator yield each time
scaled: whether the output is scaled or not.
scaler_type: which sklearn scaler to call
scale_what = either the data/label (the whole array), or the yield.'''
if scaled == True:
if scaler_type == 'standard':
scaler = sklearn.preprocessing.StandardScaler()
print('standard scaler initialized: {}'.format(scaler))
elif scaler_type == 'minmax':
scaler = sklearn.preprocessing.MinMaxScaler()
elif scaler_type == 'robust':
scaler = sklearn.preprocessing.RobustScaler()
else:
scaler = sklearn.preprocessing.StandardScaler()
print("scaled: {}, scaler_type: {}".format(scaled,scaler_type))
data_scaled = scaler.fit_transform(X=data, y=None)
#labels_scaled = scaler.fit_transform(X=labels, y=None) #i don't think you should scale the labels..
labels_scaled = labels #don't scale the labels..
#--------i think expand dims is a lot less implicit, that's why i commented these out-------
# data_scaled = np.reshape(data_scaled,(1,data_scaled.shape[0],data_scaled.shape[1]))
# labels_scaled = np.reshape(labels_scaled, (1, labels_scaled.shape[0],labels_scaled.shape[1]))
#----------------------------------------------------------------------------------------------
data_scaled = np.expand_dims(data_scaled, axis=0) # add 1 dimension in the
labels_scaled = np.expand_dims(labels_scaled, axis=0)
index = start_at
while 1: #for index in range(start_at,generator_batch_size*(data.shape[1]//generator_batch_size)):
x1 = (data_scaled[:, index:index + generator_batch_size, 0]) # first dim = 0 doesn't work.
x2 = (data_scaled[:, index:index + generator_batch_size, 1])
y1 = (labels_scaled[:, index:index + generator_batch_size, 0])
#if generator won't yield the full batch in 3 iterations, then..
if index + 3 * generator_batch_size < data_scaled.shape[1]:
index = index + generator_batch_size
else: #reset. anywhere between 0 and length of dataset - 3*batch size.
index = np.random.randint(low=0, high=(
generator_batch_size * ((data_scaled.shape[1] - start_at) // generator_batch_size - 3)))
# ----------------ENABLE THIS FOR DIAGNOSTICS----------------------
# print("x_shape at reset: {}".format(x.shape))
# print("data shape: {}, x type: {}, y type:{}".format(data_scaled.shape,type(x),type(y)))
# x = np.reshape(x,(1,x.shape[0],x.shape[1]))
# y = np.reshape(y, (1, y.shape[0],y.shape[1]))
# print("after reshaping: index: {}, x shape: {}, y shape:{}".format(index, x.shape, y.shape))
# if (index == data_scaled.shape[1] - 512): print("index reached: {}".format(index))
# print("x: {}, y: {}".format(x,y))
# -------------------ENABLE THIS FOR DIAGNOSTICS-----------------------
# print("index: {}".format(index))
# if (x.shape[1] != generator_batch_size and y.shape[1] != generator_batch_size): return
# if (x.shape[1] != generator_batch_size and y.shape[1] != generator_batch_size): raise StopIteration
assert (x1.shape[1] == generator_batch_size) #if it's not yielding properly, stop.
# assert(y.shape[1]==generator_batch_size)
yield ([x1, x2], y1)
#!!!!!!!!!!!!!!!!!!!!!TRAINING SCHEME PARAMETERS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
shortest_length = sg_utils.get_shortest_length() #a suggestion. will also print the remainders.
num_epochs = 1 #individual. like how many times is the net trained on that sequence consecutively
num_sequence_draws = 1 #how many times the training corpus is sampled.
generator_batch_size = 4
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
identifier = "_run_2b_new_gen_short_"
Base_Path = "./"
train_path = "/home/ihsan/Documents/thesis_models/train/"
test_path = "/home/ihsan/Documents/thesis_models/test/"
#seq_length_dict_filename = train_path + "/data/seq_length_dict.json"
#11 input columns
#4 output columns.
np.random.seed(1337)
#load data multiple times.
data_filenames = os.listdir(train_path + "data")
#print("before sorting, data_filenames: {}".format(data_filenames))
data_filenames.sort()
#print("after sorting, data_filenames: {}".format(data_filenames))
label_filenames = os.listdir(train_path + "label")
label_filenames.sort() #sorting makes sure the label and the data are lined up.
#print("label_filenames: {}".format(data_filenames))
assert len(data_filenames) == len(label_filenames)
combined_filenames = zip(data_filenames,label_filenames)
#print("before shuffling: {}".format(combined_filenames))
shuffle(combined_filenames)
print("after shuffling: {}".format(combined_filenames)) #shuffling works ok.
#
# #define the model first
# a = Input(shape=(None,11))
# b = Bidirectional(LSTM(350,return_sequences=True))(a)
# c = Bidirectional(LSTM(350,return_sequences=True))(b)
# d = TimeDistributed(Dense(64, activation='selu'))(c) #timedistributed wrapper gives None,64
# e = TimeDistributed(Dense(32, activation='selu'))(d)
# out = TimeDistributed(Dense(4))(e)
#
# model = Model(inputs=a,outputs=out)
# print("Model summary: {}".format(model.summary()))
# model.compile(loss='mse', optimizer='rmsprop',metrics=['accuracy','mae','mape','mse'])
#
# print("Inputs: {}".format(model.input_shape))
# print ("Outputs: {}".format(model.output_shape))
# print ("Metrics: {}".format(model.metrics_names))
#
# plot_model(model, to_file='model_' + identifier + '.png',show_shapes=True)
# #print ("Actual input: {}".format(data.shape))
# #print ("Actual output: {}".format(target.shape))
print('loading data...')
if os.path.isfile('Weights_' + str(num_sequence_draws) + identifier + '.h5') == False:
print("TRAINING PHASE")
for i in range(0,num_sequence_draws):
index_to_load = np.random.randint(0, len(combined_filenames)) # switch to iterations
files = combined_filenames[index_to_load]
data_load_path = train_path + '/data/' + files[0]
label_load_path = train_path + '/label/' + files[1]
#print("data/label load path: {} \n {}".format(data_load_path,label_load_path))
train_array = np.load(data_load_path)
label_array = np.load(label_load_path)[:,1:]
#train_array = np.reshape(train_array,(1,train_array.shape[0],train_array.shape[1]))
#label_array = np.reshape(label_array,(1,label_array.shape[0],label_array.shape[1])) #label needs to be 3D for TD!
print("data/label shape: {}, {}, draw #: {}".format(train_array.shape,label_array.shape, i))
train_generator = np_array_pair_generator(train_array,label_array,start_at=0,generator_batch_size=generator_batch_size)
print((train_generator.next())[0][0].shape)
print((train_generator.next())[0][1].shape)
print((train_generator.next())[1].shape)
#print((train_generator.next())[2].shape)
# print(train_generator.next())
# print(train_generator.next())
# print(train_generator.next())
# define the model first
a1 = Input(shape=(None, 1))
a2 = Input(shape=(None, 1))
b1 = Conv1D(2, (2), padding='valid', activation='relu')(a1)
b2 = Conv1D(2, (2), padding='valid', activation='relu')(a2)
c1 = MaxPooling1D((2))(b1)
c2 = MaxPooling1D((2))(b2)
d1 = Conv1D(2, (2), padding='valid', activation='relu')(c1)
d2 = Conv1D(2, (2), padding='valid', activation='relu')(c2)
# e1 = MaxPooling1D((2))(d1)
# e2 = MaxPooling1D((2))(d2)
f1 = Dense(8, activation='relu')(d1)
f2 = Dense(8, activation='relu')(d2)
g = concatenate([f1,f2])
out = Dense(1)(g)
model = Model(inputs=[a1,a2], outputs=out)
model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy', 'mae', 'mape', 'mse'])
print("Model summary: {}".format(model.summary()))
training_hist = model.fit_generator(np_array_pair_generator(train_array,label_array,start_at=0,generator_batch_size=generator_batch_size),epochs=num_epochs,steps_per_epoch=train_array.shape[1]//generator_batch_size,verbose=2)
|
gpl-3.0
|
achim1/HErmes
|
HErmes/selection/dataset.py
|
2
|
29071
|
"""
Datasets group categories together. Method calls on datasets invoke the individual methods
on the individual categories. Cuts applied to datasets will act on each individual category.
"""
import pandas as pd
import numpy as np
from collections import OrderedDict
from copy import deepcopy as copy
from ..visual import VariableDistributionPlot
from ..utils import isnotebook
from ..utils import Logger
from dashi.tinytable import TinyTable
from . import categories
def get_label(category):
"""
Get the label for labeling plots from a datasets plot_options dictionary.
Args:
category (HErmes.selection.categories.category): Query the category's plot_options dict, if not fall back to category.name
Returns:
string
"""
if category.plot_options:
if "label" in category.plot_options:
return category.plot_options["label"]
else:
return category.name
else:
return category.name
class Dataset(object):
"""
Holds different categories, relays calls to each
of them.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args: HErmes.selection.variables.categories.Category list
Keyword Args:
combined_categories:
"""
self.categories = []
self.combined_categories = []
# sort categories, do reweighted simulation last
# FIXME: if not, there will be problems
# FIXME: investigate!
reweighted_categories = []
self.default_plotstyles = {}
for cat in args:
self.__dict__[cat.name] = cat
if isinstance(cat,categories.ReweightedSimulation):
reweighted_categories.append(cat)
continue
self.categories.append(cat)
self.categories = self.categories + reweighted_categories
if 'combined_categories' in kwargs:
for name in list(kwargs['combined_categories'].keys()):
self.combined_categories.append(categories.CombinedCategory(name,kwargs['combined_categories'][name]))
def set_default_plotstyles(self, styledict):
"""
Define a standard for each category how
it should appear in plots
Args:
styledict (dict)
"""
self.default_plotstyles = styledict
for cat in self.categorynames:
self[cat].add_plotoptions(styledict[cat])
def add_variable(self, variable):
"""
Add a variable to this category
Args:
variable (HErmes.selection.variables.variables.Variable): A Variable instalce
"""
for cat in self.categories:
cat.add_variable(variable)
def delete_variable(self, varname):
"""
Delete a variable entirely from the dataset
Args:
varname (str): the name of the variable
Returns:
None
"""
for cat in self.categories:
cat.delete_variable(varname)
def load_vardefs(self, vardefs):
"""
Load the variable definitions from a module
Args:
vardefs (python module/dict): A module needs to contain variable definitions.
It can also be a dictionary of categoryname->module
"""
if isinstance(vardefs, dict):
for k in vardefs:
# FIXME: the way over self.__dict__ does not work
# maybe there is something more fishy...
if str(k) == "all":
for cat in self.categories:
cat.load_vardefs(vardefs[k])
for cat in self.categories:
if cat.name == k:
cat.load_vardefs(vardefs[k])
#self.__dict__[k].load_vardefs(vardefs)
else:
for cat in self.categories:
cat.load_vardefs(vardefs)
@property
def variablenames(self):
return {cat.name : cat.variablenames for cat in self.categories}
@property
def files(self):
return {cat.name : cat.files for cat in self.categories}
#@GetTiming
def read_variables(self, names=None,
max_cpu_cores=categories.MAX_CORES,
dtype=np.float64):
"""
Read out the variable for all categories
Keyword Args:
names (str): Readout only these variables if given
max_cpu_cores (int): Maximum number of cpu cores which will be used
dtype (np.dtype) : Cast to the given datatype (default is np.flaot64)
Returns:
None
"""
progbar = False
try:
import tqdm
n_it = len(self.categories)
loader_string = "Loading dataset"
if isnotebook():
bar = tqdm.tqdm_notebook(total=n_it, desc=loader_string, leave=True)
else:
bar = tqdm.tqdm(total=n_it, desc=loader_string, leave=True)
progbar = True
except ImportError:
pass
for cat in self.categories:
Logger.debug("Reading variables for {}".format(cat))
cat.read_variables(names=names, max_cpu_cores=max_cpu_cores, dtype=dtype)
if progbar: bar.update()
def drop_empty_variables(self):
"""
Delete variables which have no len
Returns:
None
"""
for cat in self.categories:
cat.drop_empty_variables()
def set_weightfunction(self, weightfunction=lambda x:x):
"""
Defines a function which is used for weighting
Args:
weightfunction (func or dict): if func is provided, set this to all categories
if needed, provide dict, cat.name -> func for individula setting
Returns:
None
"""
if isinstance(weightfunction, dict):
for cat in self.categories:
cat.set_weightfunction(weightfunction[cat.name])
else:
for cat in self.categories:
cat.set_weightfunction(weightfunction)
def calculate_weights(self, model=None, model_args=None):
"""
Calculate the weights for all categories
Keyword Args:
model (dict/func) : Either a dict catname -> func or a single func
If it is a single funct it will be applied to all categories
model_args (dict/list): variable names as arguments for the function
"""
if isinstance(model, dict):
if not isinstance(model_args, dict):
raise ValueError("if model is a dict, model_args has to be a dict too!")
for catname in model:
self.get_category(catname).calculate_weights(model=model[catname], model_args=model_args[catname])
else:
for cat in self.categories:
cat.calculate_weights(model=model, model_args=model_args)
# def get_weights(self, models):
# """
# Calculate the weights for all categories
#
# Args:
# models (dict or callable): A dictionary of categoryname -> model or a single clbl
# """
# if isinstance(models, dict):
# for catname in models:
# self.get_category(catname).get_weights(models[catname])
# if callable(models):
# for cat in self.categories:
# cat.get_weights(models)
def add_category(self,category):
"""
Add another category to the dataset
Args:
category (HErmes.selection.categories.Category): add this category
"""
self.categories.append(category)
def __getitem__(self, item):
"""
Shortcut for self.get_category/get_variable
Args:
item:
Returns:
HErmes.selection.variables.Variable/HErmes.selection.categories.Category
"""
try:
return self.get_category(item)
except KeyError:
pass
try:
return self.get_variable(item)
except KeyError:
pass
if ":" in item:
cat, var = item.split(":")
return self.get_category(cat).get(var)
else:
raise KeyError("{} can not be found".format(item))
def get_category(self, categoryname):
"""
Get a reference to a category.
Args:
category: A name which has to be associated to a category
Returns:
HErmes.selection.categories.Category
"""
for cat in self.categories:
if cat.name == categoryname:
return cat
raise KeyError("Can not find category {}.".format(categoryname))
def get_variable(self, varname):
"""
Get a pandas dataframe for all categories
Args:
varname (str): A name of a variable
Returns:
pandas.DataFrame: A 2d dataframe category -> variable
"""
var = dict()
for cat in self.categories:
var[cat.name] = cat.get(varname)
return pd.DataFrame.from_dict(var, orient="index")
def set_livetime(self, livetime):
"""
Define a livetime for this dataset.
Args:
livetime (float): Time interval the data was taken in. (Used for rate calculation)
Returns:
None
"""
for cat in self.categories:
if hasattr(cat, "set_livetime"):
cat.set_livetime(livetime)
@property
def weights(self):
"""
Get the weights for all categories in this dataset
"""
w = dict()
for cat in self.categories:
w[cat.name] = pd.Series(cat.weights, dtype=np.float64)
return pd.DataFrame.from_dict(w,orient='index')
def __repr__(self):
"""
String representation
"""
rep = """ <Dataset: """
for cat in self.categories:
rep += "{} ".format(cat.name)
rep += ">"
return rep
def add_cut(self,cut):
"""
Add a cut without applying it yet
Args:
cut (HErmes.selection.variables.cut.Cut): Append this cut to the internal cutlist
"""
for cat in self.categories:
cat.add_cut(cut)
def apply_cuts(self,inplace=False):
"""
Apply them all!
"""
for cat in self.categories:
cat.apply_cuts(inplace=inplace)
def undo_cuts(self):
"""
Undo previously done cuts, but keep them so that
they can be re-applied
"""
for cat in self.categories:
cat.undo_cuts()
def delete_cuts(self):
"""
Completely purge all cuts from this
dataset
"""
for cat in self.categories:
cat.delete_cuts()
@property
def categorynames(self):
return [cat.name for cat in self.categories]
@property
def combined_categorynames(self):
return [cat.name for cat in self.combined_categories]
def get_sparsest_category(self, omit_empty_cat=True):
"""
Find out which category of the dataset has the least statistical power
Keyword Args:
omit_empty_cat (bool): if a category has no entries at all, omit
Returns:
str: category name
"""
name = self.categories[0].name
count = self.categories[0].raw_count
for cat in self.categories:
if cat.raw_count < count:
if (cat.raw_count == 0) and omit_empty_cat:
continue
count = cat.raw_count
name = cat.name
return name
def distribution(self,name,\
ratio=([],[]),
cumulative=True,
log=False,
transform=None,
disable_weights=False,
color_palette='dark',
normalized = False,
styles = dict(),
style="classic",
ylabel="rate/bin [1/s]",
axis_properties=None,
ratiolabel="data/$\Sigma$ bg",
bins=None,
external_weights=None,
savepath=None,
figure_factory=None,
zoomin=False,
adjust_ticks = lambda x : x):
"""
One shot short-cut for one of the most used
plots in eventselections.
Args:
name (string) : The name of the variable to plot
Keyword Args:
path (str) : The path under which the plot will be saved.
ratio (list) : A ratio plot of these categories will be crated
color_palette (str) : A predifined color palette (from seaborn or HErmes.plotting.colors)
normalized (bool) : Normalize the histogram by number of events
transform (callable) : Apply this transformation before plotting
disable_weights (bool) : Disable all weighting to avoid problems with uneven sized arrays
styles (dict) : plot styling options
ylabel (str) : general label for y-axis
ratiolabel (str) : different label for the ratio part of the plot
bins (np.ndarray) : binning, if None binning will be deduced from the variable definition
figure_factory (func) : factory function which return a matplotlib.Figure
style (string) : TODO "modern" || "classic" || "modern-cumul" || "classic-cumul"
savepath (string) : Save the canvas at given path. None means it will not be saved.
external_weights (dict) : supply external weights - this will OVERIDE ANY INTERNALLY CALCULATED WEIGHTS
and use the supplied weights instead.
Must be in the form { "categoryname" : weights}
axis_properties (dict) : Manually define a plot layout with up to three axes.
For example, it can look like this:
{
"top": {"type": "h", # histogram
"height": 0.4, # height in percent
"index": 2}, # used internally
"center": {"type": "r", # ratio plot
"height": 0.2,
"index": 1},
"bottom": { "type": "c", # cumulative histogram
"height": 0.2,
"index": 0}
}
zoomin (bool) : If True, select the yrange in a way that the interesting part of the
histogram is shown. Caution is needed, since this might lead to an
overinterpretation of fluctuations.
adjust_ticks (fcn) : A function, applied on a matplotlib axes
which will set the proper axis ticks
Returns:
HErmes.selection.variables.VariableDistributionPlot
"""
# if (not cumulative) or ratio == ([],[]):
#
# # assuming a single cumulative axis
# tmp_axis_properties = dict()
# unassigned_height = 0
#
# for key in axis_properties:
# if ("c" == axis_properties[key]["type"]) and (not cumulative):
# unassigned_height += axis_properties[key]["height"]
# continue
# if ("r" == axis_properties[key]["type"]) and (ratio == ([],[])):
# unassigned_height += axis_properties[key]["height"]
# continue
#
# tmpdict = copy(axis_properties[key])
# tmpdict["index"] = tmpdict["index"] -1 - bool(ratio == ([],[]))
# tmp_axis_properties.update({key : tmpdict})
#
# n_plots = len(tmp_axis_properties.keys())
# extra_height = unassigned_height/float(n_plots)
# for key in tmp_axis_properties:
# tmp_axis_properties[key]["height"] += extra_height
#
# else:
# tmp_axis_properties = copy(axis_properties)
if axis_properties is not None:
tmp_axis_properties = copy(axis_properties)
else:
# always have the histogram, but add
# cumulative or ratio plot
if cumulative and ratio != ([],[]):
tmp_axis_properties = {\
"top": {"type": "h", \
"height": 0.4, \
"index": 2},\
"center": {"type": "r",\
"height": 0.2,\
"index": 1},\
"bottom": {"type": "c", \
"height": 0.2,\
"index": 0}\
}
elif cumulative:
tmp_axis_properties = { \
"top": {"type": "h", \
"height": 0.6, \
"index": 1}, \
"bottom": {"type": "c", \
"height": 0.4, \
"index": 0} \
}
elif ratio != ([],[]):
tmp_axis_properties = { \
"top": {"type": "h", \
"height": 0.6, \
"index": 1}, \
"bottom": {"type": "r", \
"height": 0.4, \
"index": 0} \
}
else:
tmp_axis_properties = { \
"top": {"type": "h", \
"height": 0.95, \
"index": 0}, \
}
axes_locator = [(tmp_axis_properties[k]["index"], tmp_axis_properties[k]["type"], tmp_axis_properties[k]["height"])\
for k in tmp_axis_properties]
#print (axes_locator)
#heights = [axis_properties[k]["height"] for k in axis_properties]
cuts = self.categories[0].cuts
sparsest = self.get_sparsest_category()
# check if there are user-defined bins for that variable
if bins is None:
bins = self.get_category(sparsest).vardict[name].bins
# calculate the best possible binning
if bins is None:
bins = self.get_category(sparsest).vardict[name].calculate_fd_bins()
label = self.get_category(sparsest).vardict[name].label
plot = VariableDistributionPlot(cuts=cuts, bins=bins,\
xlabel=label,\
color_palette=color_palette)
if styles:
plot.plot_options = styles
else:
plot.plot_options = self.default_plotstyles
plotcategories = self.categories + self.combined_categories
Logger.warning("For variables with different lengths the weighting is broken. If weights, it will fail")
for cat in [x for x in plotcategories if x.plot]:
if external_weights is not None:
weights = external_weights[cat.name]
elif ((cat.weights is not None) and (not disable_weights)):
weights = cat.weights
Logger.debug(f"Found {len(weights)} weights")
if not len(weights):
weights = None
else:
weights = None
Logger.debug(f"Adding variable data {name}")
plot.add_variable(cat, name, transform=transform, external_weights=weights)
if cumulative:
Logger.debug("Adding variable data {} for cumulative plot".format(name))
plot.add_cumul(cat.name)
if len(ratio[0]) and len(ratio[1]):
Logger.debug("Requested to plot ratio {} {}".format(ratio[0], ratio[1]))
tratio,tratio_err = self.calc_ratio(nominator=ratio[0],\
denominator=ratio[1])
plot.add_ratio(ratio[0],ratio[1],\
total_ratio=tratio,\
label=ratiolabel,
total_ratio_errors=tratio_err)
plot.plot(axes_locator=axes_locator,\
normalized=normalized,\
figure_factory=figure_factory,\
log=log,\
style=style,\
ylabel=ylabel,\
zoomin=zoomin,\
adjust_ticks=adjust_ticks)
#plot.add_legend()
#plot.canvas.save(savepath,savename,dpi=350)
if savepath is not None:
plot.canvas.save(savepath, name)
return plot
@property
def integrated_rate(self):
"""
Integrated rate for each category
Returns:
pandas.Panel: rate with error
"""
rdata,edata,index = [],[],[]
for cat in self.categories + self.combined_categories:
rate,error = cat.integrated_rate
rdata.append(rate)
index.append(cat.name)
edata.append(error)
rate = pd.Series(rdata,index)
err = pd.Series(edata,index)
return (rate,err)
#FIXME static method!
def sum_rate(self,categories=None):
"""
Sum up the integrated rates for categories
Args:
categories: categories considerred background
Returns:
tuple: rate with error
"""
if categories is None:
return 0,0
categories = [self.get_category(i) if isinstance(i, str) else i for i in categories]
rate,error = categories[0].integrated_rate
error = error**2
for cat in categories[1:]:
tmprate,tmperror = cat.integrated_rate
rate += tmprate # categories should be independent
error += tmperror**2
return (rate,np.sqrt(error))
def calc_ratio(self,nominator=None,denominator=None):
"""
Calculate a ratio of the given categories
Args:
nominator (list):
denominator (list):
Returns:
tuple
"""
nominator = [self.get_category(i) if isinstance(i, str) else i for i in nominator]
denominator = [self.get_category(i) if isinstance(i, str) else i for i in denominator]
a,a_err = self.sum_rate(categories=nominator)
b,b_err = self.sum_rate(categories=denominator)
if b == 0:
return np.nan, np.nan
sum_err = np.sqrt((a_err/ b) ** 2 + ((-a * b_err)/ (b ** 2)) ** 2)
return a/b, sum_err
def _setup_table_data(self,signal=None,background=None):
"""
Setup data for a table
If signal and background are given, also summed values
will be in the list
Keyword Args:
signal (list): category names which are considered signal
background (list): category names which are considered background
Returns
dict: table dictionary
"""
rates, errors = self.integrated_rate
sgrate, sgerrors = self.sum_rate(signal)
bgrate, bgerrors = self.sum_rate(background)
allrate, allerrors = self.sum_rate(self.categories)
tmprates = pd.Series([sgrate,bgrate,allrate],index=["signal","background","all"])
tmperrors = pd.Series([sgerrors,bgerrors,allerrors],index=["signal","background","all"])
rates = rates.append(tmprates)
errors = errors.append(tmperrors)
datacats = []
for cat in self.categories + self.combined_categories:
if isinstance(cat,categories.Data):
datacats.append(cat)
if datacats:
simcats = [cat for cat in self.categories if cat.name not in [kitty.name for kitty in datacats]]
simrate, simerror = self.sum_rate(simcats)
fudges = dict()
for cat in datacats:
rate,error = cat.integrated_rate
try:
fudges[cat.name] = (rate/simrate),(error/simerror)
except ZeroDivisionError:
fudges[cat.name] = np.NaN
rate_dict = OrderedDict()
all_fudge_dict = OrderedDict()
#for catname in sorted(self.categorynames) + sorted(self.combined_categorynames):
for cat in datacats:
label = get_label(cat)
#cfg = GetCategoryConfig(cat.name)
#label = cfg["label"]
rate_dict[label] = (rates[cat.name], errors[cat.name])
if cat.name in fudges:
all_fudge_dict[label] = fudges[cat.name]
else:
all_fudge_dict[label] = None
rate_dict["Sig."] = (rates["signal"],errors["signal"] )
rate_dict["Bg."] = (rates["background"],errors["background"])
rate_dict["Gr. Tot."] = (rates["all"],errors["all"])
all_fudge_dict["Sig."] = None
all_fudge_dict["Bg."] = None
all_fudge_dict["Gr. Tot."] = None
return rate_dict,all_fudge_dict
def tinytable(self,signal=None,\
background=None,\
layout="v",\
format="html",\
order_by=lambda x:x,
livetime=1.):
"""
Use dashi.tinytable.TinyTable to render a nice
html representation of a rate table
Args:
signal (list) : summing up signal categories to calculate total signal rate
background (list): summing up background categories to calculate total background rate
layout (str) : "v" for vertical, "h" for horizontal
format (str) : "html","latex","wiki"
Returns:
str: formatted table in desired markup
"""
def cellformatter(input):
#print input
if input is None:
return "-"
if isinstance(input[0],pd.Series):
input = (input[1][0],input[1][0])
return "{:4.2e} +- {:4.2e}".format(input[0],input[1])
#FIXME: sort the table columns
rates,fudges = self._setup_table_data(signal=signal,background=background)
events = dict()
for k in rates:
events[k] = rates[k][0] * livetime, rates[k][1] * livetime
showcats = [get_label(cat) for cat in self.categories if cat.show_in_table]
showcats += [get_label(cat) for cat in self.combined_categories if cat.show_in_table]
showcats.extend(['Sig.',"Bg.","Gr. Tot."])
orates = OrderedDict()
ofudges = OrderedDict()
oevents = OrderedDict()
for k in list(rates.keys()):
if k in showcats:
orates[k] = rates[k]
ofudges[k] = fudges[k]
oevents[k] = events[k]
#rates = {k : rates[k] for k in rates if k in showcats}
#fudges = {k : fudges[k] for k in fudges if k in showcats}
#events = {k : events[k] for k in events if k in showcats}
tt = TinyTable()
#bypass the add function ot add an ordered dict
for label,data in [('Rate (1/s)', orates),("Ratio", ofudges),("Events",oevents)]:
tt.x_labels.append(label)
tt.label_data[label] = data
#tt.add("Rate (1/s)", **rates)
#tt.add("Ratio",**fudges)
#tt.add("Events",**events)
return tt.render(layout=layout,format=format,\
format_cell=cellformatter,\
order_by=order_by)
#def cut_progression_table(self,cuts,\
# signal=None,\
# background=None,\
# layout="v",\
# format="html",\
# order_by=lambda x:x,
# livetime=1.):
# self.delete_cuts()
# self.undo_cuts()
# for cut in cuts:
# self.add_cut(cut)
# self.apply_cuts()
def __len__(self):
#FIXME: to be implemented
raise NotImplementedError
|
gpl-2.0
|
alemottura/PyCAPI
|
uob_scripts/timeline.py
|
1
|
6159
|
#
# timeline.py
#
# This code will create a timeline plot for a university year of all
# assignment deadlines for all courses against key dates such as holidays
#
#
# Things that need to be set:
#
# year - the university year the timeline is plotted for
year = 2016
import uob_utils
import datetime
start_date = datetime.datetime.combine(uob_utils.WeekOne(year), datetime.time(0,0,0))
end_date = datetime.datetime.combine(uob_utils.DateFromUniversityWeek(year,53,0), datetime.time(0,0,0))
year_length = (end_date-start_date).days
#
#
# bank_hol_dates - the university week number (-1) of bank holidays during
# the academic year (I think these are fixed each year but can't find
# confirmation of this, I am using this document as my guide:
# https://intranet.birmingham.ac.uk/as/cladls/timetabling/documents/public/Key-to-Weeks-2017-18.pdf
bank_hol_dates = [1, 19, 32, 37, 40]
#
#
#
#
import sys
sys.path.append("../module/") # First two lines are needed for import of PyCAPI
import PyCAPI
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
capi = PyCAPI.CanvasAPI()
###############################################################################
# Retrieve data of all courses from Canvas account
#
allcourses = capi.get_courses()
accounts = [115,116,117,121] # Needed to exclude my courses from other departments
courses = []
for course in allcourses:
if course['account_id'] in accounts:
courses.append(course)
###############################################################################
# Retrieve data of all assignments from Canvas account
#
assignments = []
for course in courses:
course_assignments = capi.get_assignments(course['id'])
for assignment in course_assignments:
if assignment['due_at'] != None:
assignments.append({'id':assignment['id'], 'name':assignment['name'], 'course_id':assignment['course_id'], 'due_date':datetime.datetime.strptime(assignment['due_at'], '%Y-%m-%dT%H:%M:%SZ')})
###############################################################################
# Generate deadline plots for timeline
#
deadline_plot = []
for assignment in assignments:
assignment['day_delta'] = (assignment['due_date']-start_date).days
assignments = sorted(assignments, key=lambda k: '%s' % (k['day_delta']))
###############################################################################
# Initiate plot
#
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
###############################################################################
# Plot timeline
#
ax.hlines(0,0,year_length)
###############################################################################
# Plot terms
#
term1 = [35]
term2 = [140]
term3 = [245]
term12_length = [77]
term3_length = [56]
term_ybars = [-0.2]
ax.bar(term1, term_ybars, term12_length, color='#ffd500', align='edge', label='Autumn Term', alpha=0.8)
ax.bar(term2, term_ybars, term12_length, color='#ff7500', align='edge', label='Spring Term', alpha=0.8)
ax.bar(term3, term_ybars, term3_length, color='#ff1500', align='edge', label='Summer Term', alpha=0.8)
###############################################################################
# Plot holidays
#
christ_hols = [112]
east_hols = [217]
sum_hols = [301]
chirsteast_length = [28]
sum_length = [63]
hol_ybars = [0.2]
ax.bar(christ_hols, hol_ybars, chirsteast_length, color='#00ff00', align='edge', label='Christmas Hols', alpha=0.8)
ax.bar(east_hols, hol_ybars, chirsteast_length, color='#00b100', align='edge', label='Easter Hols', alpha=0.8)
ax.bar(sum_hols, hol_ybars, sum_length, color='#008100', align='edge', label='Summer Hols', alpha=0.8)
###############################################################################
# Plot Mondays, date markers and bank holidays
#
mondays = []
date_labels = []
bank_hols = []
term = []
exam = []
for i in range(0,year_length,7):
mondays.append(i)
date_labels.append((start_date+datetime.timedelta(days=i)).strftime('%d %B'))
if i/7 in bank_hol_dates:
bank_hols.append(i)
#ax.eventplot(mondays, orientation='horizontal', colors='y', linelengths=0.4, lineoffset=0, linewidths=0.75, label='Mondays')
plt.xticks(mondays, date_labels, rotation=80, fontsize=5)
ax.scatter(bank_hols, np.zeros((len(bank_hols),), dtype=np.int), marker='x', color='k', label='Bank Holidays')
###############################################################################
# Plot deadlines
#
var = 0
for assignment in assignments:
if year_length > (assignment['due_date']-start_date).days > 0:
deadline_plot.append((assignment['due_date']-start_date).days)
if len(assignment['name']) > 20:
annotation_text = str(assignment['name'][:18]+'..')
else:
annotation_text = assignment['name']
if var == 0:
ax.annotate(annotation_text, xy=(assignment['day_delta'],0.6), xycoords='data', xytext=(assignment['day_delta'],3.9), textcoords='data', arrowprops=dict(facecolor='black', arrowstyle='-'), horizontalalignment='left', verticalalignment='top', rotation=75, fontsize=7)
var = 1
else:
ax.annotate(annotation_text, xy=(assignment['day_delta'],-0.6), xycoords='data', xytext=(assignment['day_delta'],-3.9), textcoords='data', arrowprops=dict(facecolor='black', arrowstyle='-'), horizontalalignment='right', verticalalignment='bottom', rotation=75, fontsize=7)
var = 0
ax.eventplot(deadline_plot, orientation='horizontal', colors='b', linelengths=1, lineoffset=0, linewidths=0.75, label='Assignment Deadline')
###############################################################################
# Format timeline
#
ax.set_title(label='Deadline Timetable')
plt.ylim(-5, 5)
plt.xlim(0,year_length+100)
plt.xlabel('Date (Monday)', fontsize=8)
plt.grid(axis='x', linestyle='--', alpha=0.2)
plt.gca().axes.get_yaxis().set_visible(False)
legend = plt.legend(loc='upper right')
legend.get_frame().set_alpha(1)
plt.subplots_adjust(top=0.92, bottom=0.15, left=0.05, right=0.95)
plt.savefig('timeline.png', format='png', dpi=400)
plt.show()
|
mit
|
hdzierz/Kaka
|
mongcore/connectors.py
|
1
|
12040
|
# -*- coding: utf-8 -*-
# Django imports
from django.db import connection, connections
# import data serializers
import gzip
import csv
import xlrd
import pandas as pd
import vcf
# Project imports
from .logger import *
from .algorithms import *
############################
## Data connectors are building on teh algoirthms defined in algorithms.py.
## These connectors see data as list of rows the connector loops through and
## apllies an operator on the row of data.
## There are foulr classes inhertiting from Data Connector:
## ExcelConnector
## SqlConnector
## CsvConnector
## DictListConnector
## The aim is to give the programmer teh abilitity to write an e.g. import operator for data which can be aplied
## to different input data formats.
############################
class DataConnector:
fn = 'None'
format = "unknown"
header = None
head_mapper = None
current = None
origin_name = None
def __init__(self):
pass
def __next__(self):
pass
def next(self):
return self.__next__()
def all(self):
pass
def close(self):
pass
class ExcelConnector(DataConnector):
fn = None
sheet_name = None
sheet = None
curr_row = 0
max_row = 0
header = None
format = "xls"
def __init__(self, fn, sheet_name=None):
self. fn = fn
self.sheet_name = sheet_name
self.load()
def __iter__(self):
return self
def __next__(self):
num_rows = self.sheet.nrows - 1
self.curr_row += 1
if(self.curr_row < num_rows):
r = self.sheet.row_values(self.curr_row)
return dict(list(zip(self.header, r)))
else:
raise StopIteration
def load(self):
workbook = xlrd.open_workbook(self.fn)
if self.sheet_name:
self.sheet = workbook.sheet_by_name(self.sheet_name)
else:
sheet_names = workbook.sheet_names()
self.sheet = workbook.sheet_by_name(sheet_names[0])
self.header = self.get_header()
def get_header(self):
return self.sheet.row_values(0)
def all(self):
res = []
for r in self:
res.append(r)
return res
@staticmethod
def GetSheets(fn):
workbook = xlrd.open_workbook(fn)
return workbook.sheet_names()
class SqlConnector(DataConnector):
fn = "DB"
format = "SQL"
cursor = None
db = None
header = None
limit_mode = False
limit = 10000
def __init__(self, qry, db=None):
self.origin_name = qry
self.db = db
self.load()
def __iter__(self):
return self
def __next__(self):
self.current = self.cursor.fetchone()
if(self.current):
return dict(list(zip(self.header, self.current)))
else:
raise StopIteration
def load(self):
if(self.db):
self.cursor = connections[self.db].cursor()
else:
self.cursor = connection.cursor()
if(self.limit_mode):
# TODO Add limit functioality
self.cursor.execute(self.origin_name)
else:
self.cursor.execute(self.origin_name)
self.header = self.get_header()
def get_header(self):
return [desc[0] for desc in self.cursor.description]
def all(self):
"Returns all rows from a cursor as a dict"
desc = self.cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in self.cursor.fetchall()
]
def close(self):
self.cursor.close()
class PgsqlConnector(SqlConnector):
format = "PSQL"
class CsvConnector(DataConnector):
reader = None
f = None
format = "CSV"
fn = ""
gzipped = False
delimiter = ','
header = None
def __init__(self, fn, delimiter=',', gzipped=False, header=None):
Logger.Message("CsvConnector: Loading " + fn)
self.origin_name = fn
self.fn = fn
self.gzipped = gzipped
self.delimiter = delimiter
self.header=header
self.load()
def __iter__(self):
return self
def load(self):
if(self.gzipped):
self.f = gzip.open(self.origin_name, 'rt')
else:
self.f = open(self.origin_name, 'rt')
self.reader = csv.DictReader(self.f, delimiter=self.delimiter, fieldnames=self.header)
self.header = self.reader.fieldnames
def __next__(self):
self.current = next(self.reader)
if(self.current):
return self.current
else:
raise StopIteration
def all(self):
d = []
for row in self:
d.append(row)
return d
def close(self):
self.f.close()
class FastqConnector(DataConnector):
reader = None
format = "FASTQ"
fn = ""
gzipped = False
header = None
def __init__(self, fn, gzipped=False):
Logger.Message("FastqConnector: Loading " + fn)
self.fn = fn
self.gzipped = gzipped
self.header=("QUAL","")
self.load()
def __iter__(self):
return self
def load(self):
if(self.gzipped):
self.f = gzip.open(self.origin_name, 'rt')
else:
self.f = open(self.origin_name, 'rt')
self.reader = csv.DictReader(self.f, delimiter=self.delimiter, fieldnames=self.header)
self.header = self.reader.fieldnames
def __next__(self):
self.current = next(self.reader)
if(self.current):
return self.current
else:
raise StopIteration
def all(self):
d = []
for row in self:
d.append(row)
return d
def close(self):
self.f.close()
class DictListConnector(DataConnector):
header = None
lst = None
fn = "dictlist"
format="Python Dict"
def __init__(self, lst, expand_obs=False):
self.lst = lst
if expand_obs:
self.lst, self.header = self.convert_obs_json()
else:
self.header = list(self.lst[0].keys())
self.current = iter(self.lst)
def make_fields_from_json(self, s, b, header, field):
# Add obs fields to new list
if field in s:
# Get the json content
a_values = json.loads(s[field])
for a in a_values:
if a not in header:
header.append(a)
b[a] = a_values[a]
return b
def convert_obs_json(self):
# Check is objects have an obs field
header = []
#if 'obs' in test:
res = []
# Browse through list of records
for s in self.lst:
b = OrderedDict()
# Add sql fields to new list
for r in s:
if r not in ["obs","obs1","obs2", "values"]:
if r not in header:
header.append(r)
b[r] = s[r]
# Add obs fields to new list
if 'obs' in s:
b = self.make_fields_from_json(s, b, header, 'obs')
if 'values' in s:
b = self.make_fields_from_json(s, b, header, 'values')
if 'obs1' in s:
b = self.make_fields_from_json(s, b, header, 'obs1')
if 'obs2' in s:
b = self.make_fields_from_json(s, b, header, 'obs2')
res.append(b)
return res, header
def load(self):
pass
def rename(self, row, tgt):
if not(self.head_mapper):
return row
n_row = dict(
(self.head_mapper[key], value) for (key, value) in list(row.items())
)
tgt.append(n_row)
return tgt
def reload(self, new_header):
self.head_mapper = dict(list(zip(self.header, new_header)))
self.header = new_header
lst = []
self.lst = accumulate(self, self.rename, lst)
def __iter__(self):
return self
def __next__(self):
cur = next(self.current)
if(cur):
return cur
else:
raise StopIteration
def all(self):
return self.lst
def close(self):
del self.lst
class PandasConnector(DataConnector):
header = None
df = None
current = None
fn = "Pandas"
format = "Pandas"
def __init__(self, df):
Logger.Message(str(df))
if(type(df)==pd.DataFrame):
self.df = df
if(type(df)==dict):
self.df = pd.DataFrame(df)
self.current = iter(self.df.iterrows())
self.header = self.df.columns.values
def __iter__(self):
return self
def __next__(self):
cur = next(self.current)
if(cur):
return cur[1]
else:
raise StopIteration
def next(self):
cur = next(self.current)
if(cur):
return cur[1]
else:
raise StopIteration
def all(self):
return self.df.to_dict(orient="records")
def close(self):
pass
import json
class DjangoModelConnector(DictListConnector):
def make_foreign_fields(self, qs, fields):
res = []
for field in fields:
if('_id' not in field):
f = qs.model._meta.get_field(field)
if(hasattr(f, 'db_constraint')):
res.append(field + '__name')
else:
res.append(field)
return res
def __init__(self, cls, qry, fields=None):
qs = cls.objects.filter(qry)
if(fields):
fields = self.make_foreign_fields(qs, *fields)
self.lst = list(qs.values(*fields))
else:
fields = qs.model._meta.get_all_field_names()
fields = self.make_foreign_fields(qs, fields)
self.lst = list(qs.values())
self.lst, self.header = self.convert_obs_json()
from collections import OrderedDict
def do_nothing():
pass
class DjangoQuerySetConnector(DictListConnector):
def make_foreign_fields(self, qs, fields):
res = []
for field in fields:
if('_id' not in field):
f = qs.model._meta.get_field(field)
if(hasattr(f, 'db_constraint')):
res.append(field + '__name')
elif('Many' in f.__class__.__name__ and 'Rel' in f.__class__.__name__):
do_nothing()
else:
res.append(field)
return res
def __init__(self, qs, fields=None):
# If fields are selected use only those fields
if(fields):
fields = self.make_foreign_fields(qs, fields)
self.lst = list(qs.values(*fields))
else:
fields = qs.model._meta.get_all_field_names()
fields = self.make_foreign_fields(qs, fields)
self.lst = list(qs.values(*fields))
self.lst, self.header = self.convert_obs_json()
self.current = iter(self.lst)
def collect_samples(sample, r):
dat = sample.data._asdict()
for d in dat:
ind = sample.sample + '__' + d
r[ind] = str(dat[d])
return r
def collect(record):
r = {}
r['CHROM'] = record.CHROM
r['POS'] = record.POS
r['REF'] = record.REF
r['ALT'] = str(record.ALT)
r['FORMAT'] = record.FORMAT
#samp = accumulate(record.samples, collect_samples, r)
return r
class VcfConnector(DataConnector):
format="VCF"
reader = None
current = None
def __init__(self, fn):
self.fn = fn
self.reader = vcf.Reader(open(fn,'r'))
def __iter__(self):
return self
def __next__(self):
self.current = next(self.reader)
if(self.current):
return self.ToDict(self.current)
else:
raise StopIteration
def ToDict(self, line):
return collect(line)
|
gpl-2.0
|
zetaris/zeppelin
|
python/src/main/resources/python/mpl_config.py
|
41
|
3653
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module provides utitlites for users to configure the inline plotting
# backend through a PyZeppelinContext instance (eg, through z.configure_mpl())
import matplotlib
def configure(**kwargs):
"""
Generic configure function.
Usage: configure(prop1='foo', prop2='bar', ...)
Currently supported zeppelin-specific properties are:
interactive - If true show all figures without explicit call to show()
via a post-execute hook.
angular - If true, bind figures to angular display system.
close - If true, close all figures once shown.
width, height - Default width / height of the figure in pixels.
fontsize - Font size.
dpi - dpi of the figure.
fmt - Figure format
supported_formats - Supported Figure formats ()
context - ZeppelinContext instance (requires PY4J)
"""
_config.update(**kwargs)
# Broadcast relevant changes to matplotlib RC
_on_config_change()
def get(key):
"""
Get the configuration info given a key
"""
return _config[key]
def _on_config_change():
# dpi
dpi = _config['dpi']
# For older versions of matplotlib, savefig.dpi is not synced with
# figure.dpi by default
matplotlib.rcParams['figure.dpi'] = dpi
if matplotlib.__version__ < '2.0.0':
matplotlib.rcParams['savefig.dpi'] = dpi
# Width and height
width = float(_config['width']) / dpi
height = float(_config['height']) / dpi
matplotlib.rcParams['figure.figsize'] = (width, height)
# Font size
fontsize = _config['fontsize']
matplotlib.rcParams['font.size'] = fontsize
# Default Figure Format
fmt = _config['format']
supported_formats = _config['supported_formats']
if fmt not in supported_formats:
raise ValueError("Unsupported format %s" %fmt)
if matplotlib.__version__ < '1.2.0':
matplotlib.rcParams.update({'savefig.format': fmt})
else:
matplotlib.rcParams['savefig.format'] = fmt
# Interactive mode
interactive = _config['interactive']
matplotlib.interactive(interactive)
def _init_config():
dpi = matplotlib.rcParams['figure.dpi']
if matplotlib.__version__ < '1.2.0':
matplotlib.rcParams.update({'savefig.format': 'png'})
fmt = matplotlib.rcParams['savefig.format']
width, height = matplotlib.rcParams['figure.figsize']
fontsize = matplotlib.rcParams['font.size']
_config['dpi'] = dpi
_config['format'] = fmt
_config['width'] = width*dpi
_config['height'] = height*dpi
_config['fontsize'] = fontsize
_config['close'] = True
_config['interactive'] = matplotlib.is_interactive()
_config['angular'] = False
_config['supported_formats'] = ['png', 'jpg', 'svg']
_config['context'] = None
_config = {}
_init_config()
|
apache-2.0
|
rohit21122012/DCASE2013
|
runs/2016/baseline32/src/dataset.py
|
37
|
78389
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import urllib2
import socket
import locale
import zipfile
import tarfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from ui import *
from general import *
from files import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path,'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name = 'CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(self.relative_to_absolute_path(os.path.join('chime_home','chunks',row[1]+'.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
fold+= 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
print self.evaluation_setup_path
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
|
mit
|
nickabattista/IB2d
|
pyIB2d/Examples/Rubberband_with_Beams/Rubberband.py
|
1
|
10247
|
'''-------------------------------------------------------------------------
IB2d is an Immersed Boundary Code (IB) for solving fully coupled non-linear
fluid-structure interaction models. This version of the code is based off of
Peskin's Immersed Boundary Method Paper in Acta Numerica, 2002.
Author: Nicholas A. Battista
Email: nick.battista@unc.edu
Date Created: May 27th, 2015\
Python 3.5 port by: Christopher Strickland
Institution: UNC-CH
This code is capable of creating Lagrangian Structures using:
1. Springs
2. Beams (*torsional springs)
3. Target Points
4. Muscle-Model (combined Force-Length-Velocity model, "HIll+(Length-Tension)")
One is able to update those Lagrangian Structure parameters, e.g.,
spring constants, resting lengths, etc
There are a number of built in Examples, mostly used for teaching purposes.
If you would like us to add a specific muscle model,
please let Nick (nick.battista@unc.edu) know.
----------------------------------------------------------------------------'''
import numpy as np
from math import cos, sin, pi, sqrt
import matplotlib.pyplot as plt
################################################################################
#
# FUNCTION: creates the RUBBERBAND-EXAMPLE geometry and prints associated input files
#
################################################################################
def Rubberband():
#
# Grid Parameters (MAKE SURE MATCHES IN input2d !!!)
#
Nx = 64 # # of Eulerian Grid Pts. in x-Direction (MUST BE EVEN!!!)
Ny = 64 # # of Eulerian Grid Pts. in y-Direction (MUST BE EVEN!!!)
Lx = 1.0 # Length of Eulerian Grid in x-Direction
Ly = 1.0 # Length of Eulerian Grid in y-Direction
# Immersed Structure Geometric / Dynamic Parameters #
N = 2*Nx # Number of Lagrangian Pts. (2x resolution of Eulerian grid)
ds = Lx/(2*Nx) # Lagrangian Spacing
a = 0.4 # Length of semi-major axis.
b = 0.2 # Length of semi-minor axis.
struct_name = 'rubberband' # Name for .vertex, .spring, etc files.
# Call function to construct geometry
xLag,yLag,C = give_Me_Immsersed_Boundary_Geometry(ds,N,a,b)
# Plot Geometry to test
plt.plot(xLag,yLag,'r-',xLag,yLag,'*')
plt.xlabel('x'); plt.ylabel('y')
plt.axis('square')
plt.show(block=True)
# Prints .vertex file!
print_Lagrangian_Vertices(xLag,yLag,struct_name)
# Prints .spring file!
#k_Spring = 1e7
#print_Lagrangian_Springs(xLag,yLag,k_Spring,ds_Rest,struct_name)
# Prints .beam file!
k_Beam = 1e7
print_Lagrangian_Beams(xLag,yLag,k_Beam,C,struct_name)
# Prints .target file!
#k_Target = 1e7
#print_Lagrangian_Target_Pts(xLag,k_Target,struct_name)
########################################################################
#
# FUNCTION: prints VERTEX points to a file called rubberband.vertex
#
########################################################################
def print_Lagrangian_Vertices(xLag,yLag,struct_name):
N = len(xLag)
with open(struct_name + '.vertex', 'w') as vertex_fid:
vertex_fid.write('{0}\n'.format(N))
#Loops over all Lagrangian Pts.
for s in range(N):
X_v = xLag[s]
Y_v = yLag[s]
vertex_fid.write('{0:1.16e} {1:1.16e}\n'.format(X_v, Y_v))
########################################################################
#
# FUNCTION: prints Vertex points to a file called rubberband.vertex
#
########################################################################
def print_Lagrangian_Target_Pts(xLag,k_Target,struct_name):
N = len(xLag)
with open(struct_name + '.target', 'w') as target_fid:
target_fid.write('{0}\n'.format(N))
#Loops over all Lagrangian Pts.
for s in range(N):
target_fid.write('{0:d} {1:1.16e}\n'.format(s, k_Target))
################################################################################
#
# FUNCTION: prints BEAM (Torsional Spring) points to a file called rubberband.beam
#
################################################################################
def print_Lagrangian_Beams(xLag,yLag,k_Beam,C,struct_name):
# k_Beam: beam stiffness
# C: beam curvature
N = len(xLag) # NOTE: Total number of beams = Number of Total Lag Pts. - 2
with open(struct_name + '.beam','w') as beam_fid:
#v = range(0,N-1,2)
beam_fid.write('{0:d}\n'.format(N))
#beam_fid.write('{0:d}\n'.format(len(v)))
#spring_force = kappa_spring*ds/(ds**2)
#BEAMS BETWEEN VERTICES
#for s = 1:2:N-1
for s in range(N):
if s==0:
beam_fid.write('{0:d} {1:d} {2:d} {3:1.16e} {4:1.16e}\n'.format(\
N-1, s, s+1, k_Beam, C[s] ))
elif s <= N-2:
beam_fid.write('{0:d} {1:d} {2:d} {3:1.16e} {4:1.16e}\n'.format(\
s-1, s, s+1, k_Beam, C[s] ))
else:
#Case s=N-1
beam_fid.write('{0:d} {1:d} {2:d} {3:1.16e} {4:1.16e}\n'.format(\
s-1, s, 0, k_Beam, C[s] ))
########################################################################
#
# FUNCTION: prints SPRING points to a file called rubberband.spring
#
########################################################################
def print_Lagrangian_Springs(xLag,yLag,k_Spring,ds_Rest,struct_name):
N = len(xLag)
with open(struct_name + '.spring', 'w') as spring_fid:
spring_fid.write('{0:d}\n'.format(N))
#spring_force = kappa_spring*ds/(ds**2)
#SPRINGS BETWEEN VERTICES
for s in range(N):
if s < N-1:
spring_fid.write('{0:d} {1:d} {2:1.16e} {3:1.16e}\n'.format(\
s, s+1, k_Spring, ds_Rest))
else:
#Case s=N-1
spring_fid.write('{0:d} {1:d} {2:1.16e} {3:1.16e}\n'.format(\
s, 0, k_Spring, ds_Rest))
########################################################################
#
# FUNCTION: gives actually ELLIPTICAL piece
#
########################################################################
def compute_ELLIPTIC_Branch(ds,rmin,rmax):
#initiate
t = [0]
xN = rmin*cos(0); x = [xN]
yN = rmax*sin(0); y = [yN]
while t[-1] <= 2*pi-ds:
xP = x[-1] # x-Prev
yP = y[-1] # y-Prev
tN = t[-1] # previous angle
tF = tN + pi/20 # far guess
tGuess = (tN + tF)/2 # guess
xN1 = rmin*cos(tGuess) # x-guess
yN1 = rmax*sin(tGuess) # y-guess
err = ( ds - sqrt( (xN1-xP)**2 + (yN1-yP)**2 ) )
while abs(err) > 1e-6:
if err > 0:
tN = tGuess # Update 'close' PT. [tN,tGuess,tF]
tGuess = (tN+tF)/2 # New Guess
xN1 = rmin*cos(tGuess) # x-guess
yN1 = rmax*sin(tGuess) # y-guess
elif err < 0:
tF = tGuess # Update FAR PT. [tN,tGuess,tF]
tGuess = (tF+tN)/2 # New Guess
xN1 = rmin*cos(tGuess) # x-guess
yN1 = rmax*sin(tGuess) # y-guess
#compute error
err = ( ds - sqrt( (xN1-xP)**2 + (yN1-yP)**2 ) )
#save values
x.append(xN1)
y.append(yN1)
#update
t.append(tGuess)
#x.append(rmin*cos(angEnd))
#y.append(rmax*sin(angEnd))
return (x,y,t)
########################################################################
#
# FUNCTION: creates the Lagrangian structure geometry
#
########################################################################
def give_Me_Immsersed_Boundary_Geometry(ds,N,rmin,rmax):
# The immsersed structure is an ellipse #
xLag,yLag,angs = compute_ELLIPTIC_Branch(ds,rmin,rmax)
xLag = [item+0.5 for item in xLag]
yLag = [item+0.5 for item in yLag]
# COMPUTES CURAVTURE IF WANT TO STAY IN INITIAL CONFIGURATION
C = compute_Curvatures(ds,angs,rmin,rmax,xLag,yLag)
N = len(xLag)
r_eff = sqrt(rmin*rmax)
xLag2 = []; yLag2 = []
for ii in range(N):
xLag2.append(0.5 + r_eff * cos( 2*pi/N*ii ))
yLag2.append(0.5 + r_eff * sin( 2*pi/N*ii ))
# COMPUTES CURAVTURE IF WANT TO SETTLE INTO A CIRCLE
C = compute_Curvatures(ds,angs,rmin,rmax,xLag2,yLag2)
return (xLag,yLag,C)
########################################################################
#
# FUNCTION: computes "curvature" of ellipse
#
# NOTE: not curvature in the traditional geometric sense, in the 'discrete'
# sense through cross product.
#
########################################################################
def compute_Curvatures(ds,angs,rmin,rmax,xLag,yLag):
#a-x component (rmin)
#b-y component (rmax)
#C = ab / ( sqrt( a^2*sin(t)^2 + b^2*cos(t)^2 ) )^3
N = len(xLag)
C = np.zeros(len(angs))
#Note: -needs to be done same order as you print .beam file!
# -THIS MAKES INITIAL BEAM CONFIGURATION THE DESIRED CURAVTURE!!
for ii in range(N):
if ii==0:
# Pts Xp -> Xq -> Xr (same as beam force calc.)
Xp = xLag[-1]; Xq = xLag[ii]; Xr = xLag[ii+1]
Yp = yLag[-1]; Yq = yLag[ii]; Yr = yLag[ii+1]
elif ii<N-1:
# Pts Xp -> Xq -> Xr (same as beam force calc.)
Xp = xLag[ii-1]; Xq = xLag[ii]; Xr = xLag[ii+1]
Yp = yLag[ii-1]; Yq = yLag[ii]; Yr = yLag[ii+1]
else:
# Pts Xp -> Xq -> Xr (same as beam force calc.)
Xp = xLag[ii-1]; Xq = xLag[ii]; Xr = xLag[0]
Yp = yLag[ii-1]; Yq = yLag[ii]; Yr = yLag[0]
# Small numbers here, roundoff error can make result slightly different
# from what you get in MATLAB.
C[ii] = (Xr-Xq)*(Yq-Yp) - (Yr-Yq)*(Xq-Xp) #Cross product btwn vectors
return C
if __name__ == "__main__":
Rubberband()
|
gpl-3.0
|
BiaDarkia/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
17
|
7819
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in scikit-learn.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
# #############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
# #############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
henridwyer/scikit-learn
|
examples/cluster/plot_agglomerative_clustering.py
|
343
|
2931
|
"""
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
bsd-3-clause
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/_carpet.py
|
1
|
63024
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Carpet(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "carpet"
_valid_props = {
"a",
"a0",
"aaxis",
"asrc",
"b",
"b0",
"baxis",
"bsrc",
"carpet",
"cheaterslope",
"color",
"customdata",
"customdatasrc",
"da",
"db",
"font",
"ids",
"idssrc",
"meta",
"metasrc",
"name",
"opacity",
"stream",
"type",
"uid",
"uirevision",
"visible",
"x",
"xaxis",
"xsrc",
"y",
"yaxis",
"ysrc",
}
# a
# -
@property
def a(self):
"""
An array containing values of the first parameter value
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["a"]
@a.setter
def a(self, val):
self["a"] = val
# a0
# --
@property
def a0(self):
"""
Alternate to `a`. Builds a linear space of a coordinates. Use
with `da` where `a0` is the starting coordinate and `da` the
step.
The 'a0' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["a0"]
@a0.setter
def a0(self, val):
self["a0"] = val
# aaxis
# -----
@property
def aaxis(self):
"""
The 'aaxis' property is an instance of Aaxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Aaxis`
- A dict of string/value properties that will be passed
to the Aaxis constructor
Supported dict properties:
arraydtick
The stride between grid lines along the axis
arraytick0
The starting index of grid lines along the axis
autorange
Determines whether or not the range of this
axis is computed in relation to the input data.
See `rangemode` for more info. If `range` is
provided, then `autorange` is set to False.
categoryarray
Sets the order in which categories on this axis
appear. Only has an effect if `categoryorder`
is set to "array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud
for categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses
"trace", which specifies the order that is
present in the data supplied. Set
`categoryorder` to *category ascending* or
*category descending* if order should be
determined by the alphanumerical order of the
category names. Set `categoryorder` to "array"
to derive the ordering from the attribute
`categoryarray`. If a category is not found in
the `categoryarray` array, the sorting behavior
for that attribute will be identical to the
"trace" mode. The unspecified categories will
follow the categories in `categoryarray`.
cheatertype
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
dtick
The stride between grid lines along the axis
endline
Determines whether or not a line is drawn at
along the final value of this axis. If True,
the end line is drawn on top of the grid lines.
endlinecolor
Sets the line color of the end line.
endlinewidth
Sets the width (in px) of the end line.
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
fixedrange
Determines whether or not this axis is zoom-
able. If true, then zoom is disabled.
gridcolor
Sets the axis line color.
gridwidth
Sets the width (in px) of the axis line.
labelpadding
Extra padding between label and the axis
labelprefix
Sets a axis label prefix.
labelsuffix
Sets a axis label suffix.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minorgridcolor
Sets the color of the grid lines.
minorgridcount
Sets the number of minor grid ticks per major
grid tick
minorgridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
range
Sets the range of this axis. If the axis `type`
is "log", then you must take the log of your
desired range (e.g. to set the range from 1 to
100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings,
like date data, though Date objects and unix
milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it
should be numbers, using the scale where each
category is assigned a serial number from zero
in the order it appears.
rangemode
If "normal", the range is computed in relation
to the extrema of the input data. If *tozero*`,
the range extends to 0, regardless of the input
data If "nonnegative", the range is non-
negative, regardless of the input data.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showticklabels
Determines whether axis labels are drawn on the
low side, the high side, both, or neither side
of the axis.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
smoothing
startline
Determines whether or not a line is drawn at
along the starting value of this axis. If True,
the start line is drawn on top of the grid
lines.
startlinecolor
Sets the line color of the start line.
startlinewidth
Sets the width (in px) of the start line.
tick0
The starting index of grid lines along the axis
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see: We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.carpet.
aaxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.carpet.aaxis.tickformatstopdefaults), sets
the default property values to use for elements
of carpet.aaxis.tickformatstops
tickmode
tickprefix
Sets a tick label prefix.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
title
:class:`plotly.graph_objects.carpet.aaxis.Title
` instance or dict with compatible properties
titlefont
Deprecated: Please use carpet.aaxis.title.font
instead. Sets this axis' title font. Note that
the title's font used to be set by the now
deprecated `titlefont` attribute.
titleoffset
Deprecated: Please use
carpet.aaxis.title.offset instead. An
additional amount by which to offset the title
from the tick labels, given in pixels. Note
that this used to be set by the now deprecated
`titleoffset` attribute.
type
Sets the axis type. By default, plotly attempts
to determined the axis type by looking into the
data of the traces that referenced the axis in
question.
Returns
-------
plotly.graph_objs.carpet.Aaxis
"""
return self["aaxis"]
@aaxis.setter
def aaxis(self, val):
self["aaxis"] = val
# asrc
# ----
@property
def asrc(self):
"""
Sets the source reference on Chart Studio Cloud for a .
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["asrc"]
@asrc.setter
def asrc(self, val):
self["asrc"] = val
# b
# -
@property
def b(self):
"""
A two dimensional array of y coordinates at each carpet point.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
# b0
# --
@property
def b0(self):
"""
Alternate to `b`. Builds a linear space of a coordinates. Use
with `db` where `b0` is the starting coordinate and `db` the
step.
The 'b0' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["b0"]
@b0.setter
def b0(self, val):
self["b0"] = val
# baxis
# -----
@property
def baxis(self):
"""
The 'baxis' property is an instance of Baxis
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Baxis`
- A dict of string/value properties that will be passed
to the Baxis constructor
Supported dict properties:
arraydtick
The stride between grid lines along the axis
arraytick0
The starting index of grid lines along the axis
autorange
Determines whether or not the range of this
axis is computed in relation to the input data.
See `rangemode` for more info. If `range` is
provided, then `autorange` is set to False.
categoryarray
Sets the order in which categories on this axis
appear. Only has an effect if `categoryorder`
is set to "array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud
for categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses
"trace", which specifies the order that is
present in the data supplied. Set
`categoryorder` to *category ascending* or
*category descending* if order should be
determined by the alphanumerical order of the
category names. Set `categoryorder` to "array"
to derive the ordering from the attribute
`categoryarray`. If a category is not found in
the `categoryarray` array, the sorting behavior
for that attribute will be identical to the
"trace" mode. The unspecified categories will
follow the categories in `categoryarray`.
cheatertype
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
dtick
The stride between grid lines along the axis
endline
Determines whether or not a line is drawn at
along the final value of this axis. If True,
the end line is drawn on top of the grid lines.
endlinecolor
Sets the line color of the end line.
endlinewidth
Sets the width (in px) of the end line.
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
fixedrange
Determines whether or not this axis is zoom-
able. If true, then zoom is disabled.
gridcolor
Sets the axis line color.
gridwidth
Sets the width (in px) of the axis line.
labelpadding
Extra padding between label and the axis
labelprefix
Sets a axis label prefix.
labelsuffix
Sets a axis label suffix.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minorgridcolor
Sets the color of the grid lines.
minorgridcount
Sets the number of minor grid ticks per major
grid tick
minorgridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
range
Sets the range of this axis. If the axis `type`
is "log", then you must take the log of your
desired range (e.g. to set the range from 1 to
100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings,
like date data, though Date objects and unix
milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it
should be numbers, using the scale where each
category is assigned a serial number from zero
in the order it appears.
rangemode
If "normal", the range is computed in relation
to the extrema of the input data. If *tozero*`,
the range extends to 0, regardless of the input
data If "nonnegative", the range is non-
negative, regardless of the input data.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showticklabels
Determines whether axis labels are drawn on the
low side, the high side, both, or neither side
of the axis.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
smoothing
startline
Determines whether or not a line is drawn at
along the starting value of this axis. If True,
the start line is drawn on top of the grid
lines.
startlinecolor
Sets the line color of the start line.
startlinewidth
Sets the width (in px) of the start line.
tick0
The starting index of grid lines along the axis
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see: We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.carpet.
baxis.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.carpet.baxis.tickformatstopdefaults), sets
the default property values to use for elements
of carpet.baxis.tickformatstops
tickmode
tickprefix
Sets a tick label prefix.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
title
:class:`plotly.graph_objects.carpet.baxis.Title
` instance or dict with compatible properties
titlefont
Deprecated: Please use carpet.baxis.title.font
instead. Sets this axis' title font. Note that
the title's font used to be set by the now
deprecated `titlefont` attribute.
titleoffset
Deprecated: Please use
carpet.baxis.title.offset instead. An
additional amount by which to offset the title
from the tick labels, given in pixels. Note
that this used to be set by the now deprecated
`titleoffset` attribute.
type
Sets the axis type. By default, plotly attempts
to determined the axis type by looking into the
data of the traces that referenced the axis in
question.
Returns
-------
plotly.graph_objs.carpet.Baxis
"""
return self["baxis"]
@baxis.setter
def baxis(self, val):
self["baxis"] = val
# bsrc
# ----
@property
def bsrc(self):
"""
Sets the source reference on Chart Studio Cloud for b .
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bsrc"]
@bsrc.setter
def bsrc(self, val):
self["bsrc"] = val
# carpet
# ------
@property
def carpet(self):
"""
An identifier for this carpet, so that `scattercarpet` and
`contourcarpet` traces can specify a carpet plot on which they
lie
The 'carpet' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["carpet"]
@carpet.setter
def carpet(self, val):
self["carpet"] = val
# cheaterslope
# ------------
@property
def cheaterslope(self):
"""
The shift applied to each successive row of data in creating a
cheater plot. Only used if `x` is been ommitted.
The 'cheaterslope' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cheaterslope"]
@cheaterslope.setter
def cheaterslope(self, val):
self["cheaterslope"] = val
# color
# -----
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# da
# --
@property
def da(self):
"""
Sets the a coordinate step. See `a0` for more info.
The 'da' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["da"]
@da.setter
def da(self, val):
self["da"] = val
# db
# --
@property
def db(self):
"""
Sets the b coordinate step. See `b0` for more info.
The 'db' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["db"]
@db.setter
def db(self, val):
self["db"] = val
# font
# ----
@property
def font(self):
"""
The default font used for axis & tick labels on this carpet
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.carpet.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.carpet.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
A two dimensional array of x coordinates at each carpet point.
If ommitted, the plot is a cheater plot and the xaxis is hidden
by default.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
A two dimensional array of y coordinates at each carpet point.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
a
An array containing values of the first parameter value
a0
Alternate to `a`. Builds a linear space of a
coordinates. Use with `da` where `a0` is the starting
coordinate and `da` the step.
aaxis
:class:`plotly.graph_objects.carpet.Aaxis` instance or
dict with compatible properties
asrc
Sets the source reference on Chart Studio Cloud for a
.
b
A two dimensional array of y coordinates at each carpet
point.
b0
Alternate to `b`. Builds a linear space of a
coordinates. Use with `db` where `b0` is the starting
coordinate and `db` the step.
baxis
:class:`plotly.graph_objects.carpet.Baxis` instance or
dict with compatible properties
bsrc
Sets the source reference on Chart Studio Cloud for b
.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
cheaterslope
The shift applied to each successive row of data in
creating a cheater plot. Only used if `x` is been
ommitted.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
da
Sets the a coordinate step. See `a0` for more info.
db
Sets the b coordinate step. See `b0` for more info.
font
The default font used for axis & tick labels on this
carpet
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
stream
:class:`plotly.graph_objects.carpet.Stream` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
A two dimensional array of x coordinates at each carpet
point. If ommitted, the plot is a cheater plot and the
xaxis is hidden by default.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
A two dimensional array of y coordinates at each carpet
point.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
"""
def __init__(
self,
arg=None,
a=None,
a0=None,
aaxis=None,
asrc=None,
b=None,
b0=None,
baxis=None,
bsrc=None,
carpet=None,
cheaterslope=None,
color=None,
customdata=None,
customdatasrc=None,
da=None,
db=None,
font=None,
ids=None,
idssrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
stream=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xsrc=None,
y=None,
yaxis=None,
ysrc=None,
**kwargs
):
"""
Construct a new Carpet object
The data describing carpet axis layout is set in `y` and
(optionally) also `x`. If only `y` is present, `x` the plot is
interpreted as a cheater plot and is filled in using the `y`
values. `x` and `y` may either be 2D arrays matching with each
dimension matching that of `a` and `b`, or they may be 1D
arrays with total length equal to that of `a` and `b`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Carpet`
a
An array containing values of the first parameter value
a0
Alternate to `a`. Builds a linear space of a
coordinates. Use with `da` where `a0` is the starting
coordinate and `da` the step.
aaxis
:class:`plotly.graph_objects.carpet.Aaxis` instance or
dict with compatible properties
asrc
Sets the source reference on Chart Studio Cloud for a
.
b
A two dimensional array of y coordinates at each carpet
point.
b0
Alternate to `b`. Builds a linear space of a
coordinates. Use with `db` where `b0` is the starting
coordinate and `db` the step.
baxis
:class:`plotly.graph_objects.carpet.Baxis` instance or
dict with compatible properties
bsrc
Sets the source reference on Chart Studio Cloud for b
.
carpet
An identifier for this carpet, so that `scattercarpet`
and `contourcarpet` traces can specify a carpet plot on
which they lie
cheaterslope
The shift applied to each successive row of data in
creating a cheater plot. Only used if `x` is been
ommitted.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
da
Sets the a coordinate step. See `a0` for more info.
db
Sets the b coordinate step. See `b0` for more info.
font
The default font used for axis & tick labels on this
carpet
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
stream
:class:`plotly.graph_objects.carpet.Stream` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
A two dimensional array of x coordinates at each carpet
point. If ommitted, the plot is a cheater plot and the
xaxis is hidden by default.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
A two dimensional array of y coordinates at each carpet
point.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
Returns
-------
Carpet
"""
super(Carpet, self).__init__("carpet")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Carpet
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Carpet`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("a", None)
_v = a if a is not None else _v
if _v is not None:
self["a"] = _v
_v = arg.pop("a0", None)
_v = a0 if a0 is not None else _v
if _v is not None:
self["a0"] = _v
_v = arg.pop("aaxis", None)
_v = aaxis if aaxis is not None else _v
if _v is not None:
self["aaxis"] = _v
_v = arg.pop("asrc", None)
_v = asrc if asrc is not None else _v
if _v is not None:
self["asrc"] = _v
_v = arg.pop("b", None)
_v = b if b is not None else _v
if _v is not None:
self["b"] = _v
_v = arg.pop("b0", None)
_v = b0 if b0 is not None else _v
if _v is not None:
self["b0"] = _v
_v = arg.pop("baxis", None)
_v = baxis if baxis is not None else _v
if _v is not None:
self["baxis"] = _v
_v = arg.pop("bsrc", None)
_v = bsrc if bsrc is not None else _v
if _v is not None:
self["bsrc"] = _v
_v = arg.pop("carpet", None)
_v = carpet if carpet is not None else _v
if _v is not None:
self["carpet"] = _v
_v = arg.pop("cheaterslope", None)
_v = cheaterslope if cheaterslope is not None else _v
if _v is not None:
self["cheaterslope"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("da", None)
_v = da if da is not None else _v
if _v is not None:
self["da"] = _v
_v = arg.pop("db", None)
_v = db if db is not None else _v
if _v is not None:
self["db"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "carpet"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
mit
|
Soya93/Extract-Refactoring
|
python/helpers/pydev/pydev_ipython/matplotlibtools.py
|
12
|
5436
|
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
setattr(matplotlib, "real_use", getattr(matplotlib, "use"))
setattr(matplotlib, "use", patched_use)
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive"))
setattr(matplotlib, "is_interactive", patched_is_interactive)
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
|
apache-2.0
|
w1kke/pylearn2
|
pylearn2/models/independent_multiclass_logistic.py
|
44
|
2491
|
"""
Multiclass-classification by taking the max over a set of one-against-rest
logistic classifiers.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import logging
try:
from sklearn.linear_model import LogisticRegression
except ImportError:
LogisticRegression = None
import numpy as np
from theano.compat.six.moves import xrange
logger = logging.getLogger(__name__)
class IndependentMulticlassLogistic:
"""
Fits a separate logistic regression classifier for each class, makes
predictions based on the max output: during training, views a one-hot label
vector as a vector of independent binary labels, rather than correctly
modeling them as one-hot like softmax would do.
This is what Jia+Huang used to get state of the art on CIFAR-100
Parameters
----------
C : WRITEME
"""
def __init__(self, C):
self.C = C
def fit(self, X, y):
"""
Fits the model to the given training data.
Parameters
----------
X : ndarray
2D array, each row is one example
y : ndarray
vector of integer class labels
"""
if LogisticRegression is None:
raise RuntimeError("sklearn not available.")
min_y = y.min()
max_y = y.max()
assert min_y == 0
num_classes = max_y + 1
assert num_classes > 1
logistics = []
for c in xrange(num_classes):
logger.info('fitting class {0}'.format(c))
cur_y = (y == c).astype('int32')
logistics.append(LogisticRegression(C = self.C).fit(X,cur_y))
return Classifier(logistics)
class Classifier:
"""
.. todo::
WRITEME
Parameters
----------
logistics : WRITEME
"""
def __init__(self, logistics):
assert len(logistics) > 1
num_classes = len(logistics)
num_features = logistics[0].coef_.shape[1]
self.W = np.zeros((num_features, num_classes))
self.b = np.zeros((num_classes,))
for i in xrange(num_classes):
self.W[:,i] = logistics[i].coef_
self.b[i] = logistics[i].intercept_
def predict(self, X):
"""
.. todo::
WRITEME
"""
return np.argmax(self.b + np.dot(X,self.W), 1)
|
bsd-3-clause
|
thomasaarholt/hyperspy
|
hyperspy/tests/drawing/test_plot_signal.py
|
3
|
10437
|
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import numpy as np
import pytest
import traits.api as t
import hyperspy.api as hs
from hyperspy.drawing.signal1d import Signal1DFigure, Signal1DLine
from hyperspy.drawing.image import ImagePlot
from hyperspy.misc.test_utils import update_close_figure, check_closing_plot
scalebar_color = 'blue'
default_tol = 2.0
baseline_dir = 'plot_signal'
style_pytest_mpl = 'default'
class _TestPlot:
def __init__(self, ndim, sdim, data_type='real'):
shape = np.arange(1, ndim + sdim + 1) * 5
n = 1
for i in shape:
n *= i
data = np.arange(n).reshape(shape)
title = 'Signal: %i, Navigator: %i' % (sdim, ndim)
dtype = ''
if 'complex' in data_type:
data = data + 1j * (data + 9)
title += ', complex'
dtype = 'Complex'
s = hs.signals.__dict__['%sSignal%iD' % (dtype, sdim)](data)
if sdim == 1:
s.axes_manager = self._set_signal_axes(s.axes_manager, name='Energy',
units='keV', scale=.5, offset=0.3)
elif sdim == 2:
s.axes_manager = self._set_signal_axes(s.axes_manager, name='Reciprocal distance',
units='1/nm', scale=1, offset=0.0)
if ndim > 0:
s.axes_manager = self._set_navigation_axes(s.axes_manager, name='',
units='nm', scale=1.0,
offset=5.0)
s.metadata.General.title = title
# workaround to be able to access the figure in case of complex 2d
# signals
if 'complex' in data_type and sdim == 2:
real = s.real
real.plot()
self.real_plot = real._plot
imag = s.imag
imag.plot()
self.imag_plot = imag._plot
self.signal = s
self.sdim = sdim
def _set_navigation_axes(self, axes_manager, name=t.Undefined,
units=t.Undefined, scale=1.0, offset=0.0):
for nav_axis in axes_manager.navigation_axes:
nav_axis.units = units
nav_axis.scale = scale
nav_axis.offset = offset
return axes_manager
def _set_signal_axes(self, axes_manager, name=t.Undefined,
units=t.Undefined, scale=1.0, offset=0.0):
for sig_axis in axes_manager.signal_axes:
sig_axis.name = name
sig_axis.units = units
sig_axis.scale = scale
sig_axis.offset = offset
return axes_manager
def _generate_parameter():
parameters = []
for ndim in [0, 1, 2]:
for sdim in [1, 2]:
for plot_type in ['nav', 'sig']:
# For complex 2D, there are 4 figures generated, some of these
# tests are redondants
for data_type in ['real', 'complex_real', 'complex_imag']:
if ndim == 0 and plot_type == "nav": # in this case, no nav figure
pass
else:
parameters.append([ndim, sdim, plot_type, data_type])
return parameters
@pytest.mark.parametrize(("ndim", "sdim", "plot_type", "data_type"),
_generate_parameter())
@pytest.mark.mpl_image_compare(
baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl)
def test_plot_sig_nav(ndim, sdim, plot_type, data_type):
test_plot = _TestPlot(ndim, sdim, data_type)
test_plot.signal.plot()
return _get_figure(test_plot, data_type, plot_type)
@pytest.mark.parametrize("sdim", [1, 2])
@pytest.mark.mpl_image_compare(
baseline_dir=baseline_dir, tolerance=default_tol, style=style_pytest_mpl)
def test_plot_data_changed_event(sdim):
if sdim == 2:
s = hs.signals.Signal2D(np.arange(25).reshape((5, 5)))
else:
s = hs.signals.Signal1D(np.arange(25))
s.plot()
s.data *= -2
s.events.data_changed.trigger(obj=s)
return plt.gcf()
def _get_figure(test_plot, data_type, plot_type):
if plot_type == "sig":
plot = "signal_plot"
elif plot_type == "nav":
plot = "navigator_plot"
if "complex" in data_type and test_plot.sdim == 2:
if data_type == "complex_real":
plot_part = 'real_plot'
elif data_type == "complex_imag":
plot_part = 'real_plot'
fig = getattr(getattr(test_plot, plot_part), plot).figure
else:
fig = getattr(test_plot.signal._plot, plot).figure
return fig
@update_close_figure
def test_plot_nav0_sig1_close():
test_plot = _TestPlot(ndim=0, sdim=1, data_type="real")
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav1_sig1_close():
test_plot = _TestPlot(ndim=1, sdim=1, data_type="real")
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav2_sig1_close():
test_plot = _TestPlot(ndim=2, sdim=1, data_type="real")
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav0_sig2_close():
test_plot = _TestPlot(ndim=0, sdim=2, data_type="real")
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav1_sig2_close():
test_plot = _TestPlot(ndim=1, sdim=2, data_type="real")
test_plot.signal.plot()
return test_plot.signal
@update_close_figure
def test_plot_nav2_sig2_close():
test_plot = _TestPlot(ndim=2, sdim=2, data_type="real")
test_plot.signal.plot()
return test_plot.signal
@pytest.mark.parametrize("sdim", [1, 2])
def test_plot_close_cycle(sdim):
test_plot = _TestPlot(ndim=2, sdim=sdim, data_type="real")
s = test_plot.signal
s.plot()
s._plot.close()
assert s._plot.signal_plot is None
assert s._plot.navigator_plot is None
s.plot()
assert s._plot.signal_plot is not None
assert s._plot.navigator_plot is not None
s._plot.close()
@pytest.mark.parametrize('autoscale', ['', 'x', 'xv', 'v'])
@pytest.mark.parametrize("ndim", [1, 2])
def test_plot_navigator_kwds(ndim, autoscale):
test_plot_nav1d = _TestPlot(ndim=ndim, sdim=2, data_type="real")
s = test_plot_nav1d.signal
s.plot(navigator_kwds={'norm':'log', 'autoscale':autoscale})
if ndim == 1:
assert isinstance(s._plot.navigator_plot, Signal1DFigure)
plot = s._plot.navigator_plot.ax_lines[0]
assert isinstance(plot, Signal1DLine)
else:
plot = s._plot.navigator_plot
assert isinstance(plot, ImagePlot)
assert plot.norm == 'log'
assert plot.autoscale == autoscale
s._plot.close()
def test_plot_signal_dim0():
s = hs.signals.BaseSignal(np.arange(100)).T
s.plot()
assert s._plot.signal_plot is None
assert s._plot.navigator_plot is not None
s._plot.close()
check_closing_plot(s)
@pytest.mark.parametrize('bool_value', [True, False])
@pytest.mark.parametrize("sdim", [1, 2])
def test_data_function_kwargs(sdim, bool_value):
test_plot_nav1d = _TestPlot(ndim=1, sdim=sdim, data_type="complex")
s = test_plot_nav1d.signal
s.plot(power_spectrum=bool_value, fft_shift=bool_value)
if sdim == 1:
for key in ['power_spectrum', 'fft_shift']:
assert s._plot.signal_data_function_kwargs[key] is bool_value
else:
for key in ['power_spectrum', 'fft_shift']:
assert s._plot_kwargs[key] is bool_value
def test_plot_power_spectrum():
s = hs.signals.Signal1D(np.arange(100))
with pytest.raises(ValueError):
s.plot(power_spectrum=True)
s = hs.signals.ComplexSignal1D(np.arange(100))
s.plot(power_spectrum=True)
assert s._plot.signal_data_function_kwargs['power_spectrum'] is True
@pytest.mark.parametrize("sdim", [1, 2])
@pytest.mark.parametrize("ndim", [1, 2, 3])
def test_plot_slider(ndim, sdim):
test_plot_nav1d = _TestPlot(ndim=ndim, sdim=sdim, data_type="real")
s = test_plot_nav1d.signal
# Plot twice to check that the args of the second call are used.
s.plot()
s.plot(navigator='slider')
assert s._plot.signal_plot is not None
assert s._plot.navigator_plot is None
s._plot.close()
check_closing_plot(s)
if ndim > 1:
s.plot(navigator='spectrum')
assert s._plot.signal_plot is not None
assert s._plot.navigator_plot is not None
assert isinstance(s._plot.navigator_plot, Signal1DFigure)
s._plot.close()
check_closing_plot(s)
if ndim > 2:
s.plot()
assert s._plot.signal_plot is not None
assert s._plot.navigator_plot is not None
assert len(s.axes_manager.events.indices_changed.connected) >= 2
s._plot.close()
check_closing_plot(s)
@pytest.mark.parametrize("ndim", [1, 2])
def test_plot_navigator_plot_signal(ndim):
test_plot_nav1d = _TestPlot(ndim=ndim, sdim=1, data_type="real")
s = test_plot_nav1d.signal
navigator = -s.sum(-1).T
s.plot(navigator=navigator)
if ndim == 1:
navigator_data = s._plot.navigator_plot.ax_lines[0]._get_data()
else:
navigator_data = s._plot.navigator_plot._current_data
np.testing.assert_allclose(navigator_data, navigator.data)
s._plot.close()
check_closing_plot(s)
s.plot(navigator=None)
assert s._plot.signal_plot is not None
assert s._plot.navigator_plot is None
s._plot.close()
check_closing_plot(s)
@pytest.mark.parametrize("sdim", [1, 2])
def test_plot_autoscale(sdim):
test_plot_nav1d = _TestPlot(ndim=1, sdim=sdim, data_type="real")
s = test_plot_nav1d.signal
with pytest.raises(ValueError):
s.plot(autoscale='xa')
|
gpl-3.0
|
iModels/demos
|
demos/ethane_box/ethane_box.py
|
1
|
2466
|
import os
import time
import matplotlib.pyplot as plt
import seaborn as sns
import mbuild as mb
import metamds as mds
import mdtraj as md
def build_ethane_box(box, n_molecules, **kwargs):
from mbuild.examples import Ethane
ethane = Ethane()
full_box = mb.fill_box(ethane, n_molecules, box)
full_box.name = '{}_ethanes'.format(n_molecules)
return full_box
def create_run_script(compound, forcefield, input_dir, **kwargs):
name = compound.name
em = os.path.join(input_dir, 'em.mdp')
nvt = os.path.join(input_dir, 'nvt.mdp')
gro = '{name}.gro'.format(name=name)
top = '{name}.top'.format(name=name)
compound.save(top, forcefield=forcefield, overwrite=True)
em_grompp = 'gmx grompp -f {mdp} -c {gro} -p {top} -o em.tpr'.format(mdp=em, gro=gro, top=top)
em_mdrun = 'gmx mdrun -v -deffnm em'
nvt_grompp = 'gmx grompp -f {mdp} -c em.gro -p {top} -o nvt.tpr'.format(mdp=nvt, top=top)
nvt_mdrun = 'gmx mdrun -v -deffnm nvt'
script = (em_grompp, em_mdrun, nvt_grompp, nvt_mdrun)
return script
if __name__ == '__main__':
# Input parameters
parameters = {'n_molecules': 200,
'box': [3, 3, 3],
'forcefield': 'OPLS-aa'}
# Build the initial configuration
compound = build_ethane_box(**parameters)
#compound.visualize()
parameters['compound'] = compound
# Initialize a simulation instance with a template and some metadata
sim = mds.Simulation(name='ethane', template=create_run_script, output_dir='output')
# Parameterize our simulation template
task = sim.parametrize(**parameters)
# Run
#task.execute()
task.execute(hostname='rahman.vuse.vanderbilt.edu', username='ctk3b')
print(task.status())
time.sleep(10)
task.sync()
# Analyze
trajectories = task.get_output_files('trajectories')
topologies = task.get_output_files('topologies')
# Pick which one to select?
trj_path = os.path.join(task.output_dir, 'nvt.xtc')
top_path = os.path.join(task.output_dir, 'em.gro')
traj = md.load(trj_path, top=top_path)
print(traj)
# RDF
# pairs = traj.top.select_pairs('name C', 'name C')
# r, g_r = md.compute_rdf(traj, pairs)
# plt.plot(r, g_r)
# plt.xlabel('r (nm)')
# plt.ylabel('g(r)')
# plt.show()
#
# s2 = md.compute_nematic_order(traj, 'residues')
# plt.plot(traj.time, s2)
# plt.xlabel('time (ps)')
# plt.ylabel('S2')
|
mit
|
tillahoffmann/tensorflow
|
tensorflow/python/estimator/inputs/inputs.py
|
94
|
1290
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long
_allowed_symbols = [
'numpy_input_fn',
'pandas_input_fn'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
|
apache-2.0
|
mne-tools/mne-tools.github.io
|
0.11/_downloads/plot_ems_filtering.py
|
19
|
2981
|
"""
==============================================
Compute effect-matched-spatial filtering (EMS)
==============================================
This example computes the EMS to reconstruct the time course of
the experimental effect as described in:
Aaron Schurger, Sebastien Marti, and Stanislas Dehaene, "Reducing multi-sensor
data to a single time course that reveals experimental effects",
BMC Neuroscience 2013, 14:122
This technique is used to create spatial filters based on the
difference between two conditions. By projecting the trial onto the
corresponding spatial filters, surrogate single trials are created
in which multi-sensor activity is reduced to one time series which
exposes experimental effects, if present.
We will first plot a trials x times image of the single trials and order the
trials by condition. A second plot shows the average time series for each
condition. Finally a topographic plot is created which exhibits the
temporal evolution of the spatial filters.
"""
# Author: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.decoding import compute_ems
print(__doc__)
data_path = sample.data_path()
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_ids = {'AudL': 1, 'VisL': 3, 'AudR': 2, 'VisR': 4}
tmin = -0.2
tmax = 0.5
# Read data and create epochs
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 45)
events = mne.read_events(event_fname)
include = [] # or stim channels ['STI 014']
ch_type = 'grad'
picks = mne.pick_types(raw.info, meg=ch_type, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
reject = dict(grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
baseline=None, reject=reject)
# Let's equalize the trial counts in each condition
epochs.equalize_event_counts(epochs.event_id, copy=False)
# compute surrogate time series
surrogates, filters, conditions = compute_ems(epochs, ['AudL', 'VisL'])
times = epochs.times * 1e3
plt.figure()
plt.title('single trial surrogates')
plt.imshow(surrogates[conditions.argsort()], origin='lower', aspect='auto',
extent=[times[0], times[-1], 1, len(surrogates)],
cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Trials (reordered by condition)')
plt.figure()
plt.title('Average EMS signal')
mappings = [(k, v) for k, v in event_ids.items() if v in conditions]
for key, value in mappings:
ems_ave = surrogates[conditions == value]
ems_ave *= 1e13
plt.plot(times, ems_ave.mean(0), label=key)
plt.xlabel('Time (ms)')
plt.ylabel('fT/cm')
plt.legend(loc='best')
# visualize spatial filters across time
plt.show()
evoked = epochs.average()
evoked.data = filters
evoked.plot_topomap(ch_type=ch_type)
|
bsd-3-clause
|
ngoix/OCRF
|
examples/bicluster/plot_spectral_biclustering.py
|
403
|
2011
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
|
bsd-3-clause
|
ankurankan/scikit-learn
|
sklearn/metrics/setup.py
|
299
|
1024
|
import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
birdsarah/bokeh
|
bokeh/mplexporter/renderers/base.py
|
11
|
14395
|
from __future__ import absolute_import
import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
|
bsd-3-clause
|
schets/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
283
|
1678
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
jayshonzs/ESL
|
PropertypeMethodsAndKNN/LVQ.py
|
1
|
2783
|
'''
Created on 2014-8-7
@author: xiajie
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import simulate_data
import K_means
def euclidean(x1, x2):
return np.linalg.norm(x1-x2)
def move(center, x, eps):
d = x - center
center = center + eps*d
def train(X, model, distance=euclidean):
eps = 0.05
while eps > 0.00001:
for i, x in enumerate(X):
c = None
if i < 100:
c = 0
elif i < 200:
c = 1
else:
c = 2
k, t = predict(model, x)
if k == c:
move(model[t], x, eps)
else:
move(model[t], x, -eps)
eps *= 0.95
return model
def predict(model, x, distance=euclidean):
min_distance = 999999999.
best_i = None
best_k = None
for i in range(len(model)):
d = distance(x, model[i])
if d < min_distance:
min_distance = d
best_i = i
if best_i < 5:
best_k = 0
elif best_i < 10:
best_k = 1
else:
best_k = 2
return best_k, best_i
def extract_centers(model):
centers = []
t = []
for k in range(len(model)):
for r in range(len(model[k][1])):
center = model[k][1][r]
centers.append(center)
t.append(k)
return np.array(centers), np.array(t)
def draw(data, classes, model, resolution=100):
mycm = mpl.cm.get_cmap('Paired')
one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
np.arange(two_min, two_max, (two_max-two_min)/resolution))
inputs = np.c_[xx1.ravel(), xx2.ravel()]
z = []
for i in range(len(inputs)):
z.append(predict(model, inputs[i])[0])
result = np.array(z).reshape(xx1.shape)
plt.contourf(xx1, xx2, result, cmap=mycm)
plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
t = np.zeros(15)
for i in range(15):
if i < 5:
t[i] = 0
elif i < 10:
t[i] = 1
else:
t[i] = 2
plt.scatter(model[:, 0], model[:, 1], s=150, c=t, cmap=mycm)
plt.xlim([0, 10])
plt.ylim([0, 10])
plt.show()
if __name__ == '__main__':
X = simulate_data.loaddata()
t = []
for i in range(300):
if i < 100:
t.append(0)
elif i < 200:
t.append(1)
else:
t.append(2)
k_means_model = K_means.train(X)
init_model = extract_centers(k_means_model)[0]
model = train(X, init_model)
draw(X, np.array(t), model)
|
mit
|
quimaguirre/diana
|
diana/classes/drug.py
|
1
|
51685
|
import os, sys, re
import pickle
import pandas as pd
import hashlib
class Drug(object):
"""
Class defining a Drug object
"""
def __init__(self, drug_name):
"""
@param: drug_name
@pdef: Name of the drug
@ptype: {String}
@raises: {IncorrectTypeID} if the method translate_network is used with
a network of type_id different from 'biana'
"""
self.drug_name = drug_name.lower()
self.type_name = self.recognize_name(drug_name.lower())
self.targets = []
self.targets_in_network = []
self.pfams = []
self.smiles = []
self.ATCs = []
self.level_to_ATCs = {'level1':[], 'level2':[], 'level3':[], 'level4':[], 'level5':[]}
self.SEs = []
self.target_type_id = None
self.target_type_id_to_table = {
'geneid' : 'externalEntityGeneID',
'genesymbol' : 'externalEntityGeneSymbol',
'uniprotentry' : 'externalEntityUniprotEntry',
'uniprotaccession' : 'externalEntityUniprotAccession',
}
self.type_name_to_table = {
'name' : 'externalEntityName',
'drugbankid' : 'externalEntityDrugBankID',
'dcdb' : 'externalEntityDCDB_drugID',
'chemblid' : 'externalEntityCHEMBL',
'pubchemcompound' : 'externalEntityPubChemCompound',
}
###########
# METHODS #
###########
def obtain_targets_from_file(self, targets_file, target_type_id):
"""
Obtains the targets from an input file and stores them into a list.
The file must contain the names of the targets separated by new lines.
The type of ID of the targets must be specified.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
with open(targets_file, 'r') as targets_file_fd:
for line in targets_file_fd:
self.targets.append(line.strip())
# Check if the number of targets provided is sufficient for the analysis
if len(self.targets) < 1:
raise InsufficientTargets(self.targets)
return
def obtain_targets_from_pickle(self, drug2targets_file, target_type_id):
"""
Obtains the targets from an input pickle file and stores them into a list.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
drug2targets = pickle.load(open(drug2targets_file))
drug_id = self.drug_name.upper()
if drug_id in drug2targets:
self.targets = list(drug2targets[drug_id])
else:
raise InsufficientTargets(self.targets)
# Check if the number of targets provided is sufficient for the analysis
if len(self.targets) < 1:
raise InsufficientTargets(self.targets)
return
def obtain_drugbankids_from_table(self, drug_mapping_file):
"""
Obtains the drugbankids of a drug from an input table and stores them into a list.
Usually, there is only one drugbankid, but there could be multiple ones in some occasions.
"""
# Get DrugBankID for the input drug
if self.type_name != 'drugbankid':
drug_mapping_df = pd.read_csv(drug_mapping_file, sep='\t', index_col=None)
if self.type_name == 'name':
# Select drugbank ids with the input name
drugnames_df = drug_mapping_df[(drug_mapping_df['type_identifier'] == 'name') & (drug_mapping_df['identifier'] == self.drug_name)]
drugbankids = set(drugnames_df['#drugbankid'].tolist())
if len(drugbankids) == 0:
raise DrugNameNotFound(self.drug_name, self.type_name)
elif len(drugbankids) > 1:
# Check if the input name is unique
if 'unique' in drugnames_df['type_name'].tolist():
drugbankids = set(drugnames_df.loc[drugnames_df['type_name'] == 'unique', '#drugbankid'].tolist())
if len(drugbankids) == 0:
drugbankids = set(drugnames_df['#drugbankid'].tolist())
else:
drugbankids = set(drug_mapping_df.loc[(drug_mapping_df['type_identifier'] == self.type_name) & (drug_mapping_df['identifier'] == self.drug_name), '#drugbankid'].tolist())
if len(drugbankids) == 0:
raise DrugNameNotFound(self.drug_name, self.type_name)
else:
drugbankids = [self.drug_name.upper()]
return drugbankids
def obtain_targets_from_table(self, drugbankids, drug_to_targets_file, target_type_id='geneid'):
"""
Obtains the targets from an input table and stores them into a list.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
# Get targets
targets = set()
drug_to_targets_df = pd.read_csv(drug_to_targets_file, sep='\t', index_col=None)
for group_targets in drug_to_targets_df.loc[drug_to_targets_df['#drugbankid'].isin(drugbankids), 'geneids'].tolist():
targets = targets | set(group_targets.split('; '))
# Check if the number of targets provided is sufficient for the analysis
if len(targets) < 1:
raise InsufficientTargets(self.targets)
else:
self.targets = targets
return
def obtain_targets_from_BIANA(self, biana_cnx, target_type_id, unification_protocol):
"""
Obtains the targets from BIANA database using as query the drug name.
The type of ID of the targets must be specified.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
"""
self.target_type_id = target_type_id.lower() # Annotate the type of ID of the targets
target_type_id_table = self.return_targets_biana_table(self.target_type_id) # Obtain the table containing the type of ID introduced
type_name_table = self.return_drug_biana_table(self.type_name) # Obtain the table containing the type of name introduced
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
# Select the external entity ID of the DCDB drug
query1 = (''' SELECT externalEntityID FROM {} WHERE value = %s
'''.format(type_name_table))
# Select the geneID targets of the drug, only therapeutic ones, and only from DrugBank database!
query2 = (''' SELECT G.value FROM externalEntity E1, {} U1, {} U2, externalEntity E2, externalEntityRelationParticipant R2, externalEntityRelationParticipant R3, externalEntityDrugBank_targetID T, {} U3, {} U4, externalEntityGeneID G
WHERE E1.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = E2.externalEntityID AND E2.type = 'drug'
AND E2.externalEntityID = R2.externalEntityID AND R2.externalEntityRelationID = R3.externalEntityRelationID AND R3.externalEntityID = T.externalEntityID AND T.targetType = "therapeutic"
AND R3.externalEntityID = U3.externalEntityID AND U3.userEntityID = U4.userEntityID AND U4.externalEntityID = G.externalEntityID AND E1.externalEntityID = %s
'''.format(up_table, up_table, up_table, up_table, target_type_id_table))
cursor.execute(query1, (self.drug_name,))
external_entities = set()
geneids = set()
# Search for the external entities corresponding to the name of the drug
for items in cursor:
for ee in items:
external_entities.add(ee)
# Search for the geneIDs interacting with the drug
if len(external_entities) > 0:
for ee in external_entities:
cursor.execute(query2, (ee,))
for items in cursor:
for geneid in items:
geneids.add(geneid)
else:
raise DrugNameNotFound(self.drug_name, self.type_name)
# Why in two steps?
# Because as the table "externalEntityName" is too large, do one complex command can be very time-consuming
# It is better to split the search in two commands
cursor.close()
self.targets = list(geneids)
# Check if the number of targets provided is sufficient for the analysis
if len(self.targets) < 1:
raise InsufficientTargets(self.targets)
return
def recognize_name(self, drug_name):
"""
Recognizes the type of name of the drug
(dcdb, drugbank or name)
"""
dcdb_pattern = re.compile('^dcc[0-9]{4}$')
drugbank_pattern = re.compile('^db[0-9]{5}$')
chembl_pattern = re.compile('^chembl[0-9]+$')
pubchem_pattern = re.compile('^[0-9]+$')
diana_pattern = re.compile('^diana_.*$')
if dcdb_pattern.match(drug_name):
self.drug_name = drug_name.upper()
return 'dcdb'
elif drugbank_pattern.match(drug_name):
self.drug_name = drug_name.upper()
return 'drugbankid'
elif chembl_pattern.match(drug_name):
self.drug_name = drug_name.upper()
return 'chemblid'
elif pubchem_pattern.match(drug_name):
return 'pubchemcompound'
elif diana_pattern.match(drug_name):
return 'diana'
else:
return 'name'
def return_drug_biana_table(self, type_name):
"""
Returns the table in BIANA where the type of drug name
introduced is stored.
"""
if type_name in self.type_name_to_table:
return self.type_name_to_table[type_name]
def return_targets_biana_table(self, target_type_id):
"""
Returns the table in BIANA where the annotations of the type of ID
introduced are stored.
"""
if target_type_id in self.target_type_id_to_table:
return self.target_type_id_to_table[target_type_id]
else:
raise IncorrectTypeID(target_type_id, self.target_type_id_to_table)
def obtain_pfams_from_file(self, pfam_file):
"""
Obtains the pfams from an input file and stores them into a list.
The file must contain the names of the pfams separated by new lines.
"""
with open(pfam_file, 'r') as pfam_file_fd:
for line in pfam_file_fd:
self.pfams.append(line.strip())
return
def obtain_pfams_from_pickle(self, pfam_pickle_file, output_file):
"""
Obtains the pfams from an input pickle file and stores them into a list.
"""
geneid2pfam = pickle.load(open(pfam_pickle_file))
all_pfams = set()
for target in self.targets:
if target in geneid2pfam:
pfams = geneid2pfam[target]
for pfam in pfams:
all_pfams.add(pfam)
if len(all_pfams) > 0:
self.pfams = list(all_pfams)
with open(output_file, 'w') as pfam_fd:
for pfam in self.pfams:
pfam_fd.write('{}\n'.format(pfam))
else:
print('No PFAMS found for the targets introduced: {}.\n'.format(', '.join(self.targets)))
return
def obtain_pfams_from_geneid_target_table(self, geneids, geneid_target_mapping_file):
"""
Obtains the pfams of a list of targets (in gene ID) from an input table and stores them into a list.
"""
# Get pfams
geneid_mappings_df = pd.read_csv(geneid_target_mapping_file, sep='\t', index_col=None)
pfams_df = geneid_mappings_df[(geneid_mappings_df['#geneid'].isin(geneids)) & (geneid_mappings_df['type_identifier'] == 'pfam')]
self.pfams = set([pfam.upper() for pfam in pfams_df['identifier'].tolist()])
return
def obtain_pfams_from_targets(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the pfams from BIANA database using as query the targets.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the PFAMs found in an output file
"""
target_type_id_table = self.return_targets_biana_table(self.target_type_id) # Obtain the table containing the type of ID introduced
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = (''' SELECT P.value FROM {} G, {} U1, {} U2, externalEntityPFAM P
WHERE G.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = P.externalEntityID AND G.value = %s
'''.format(target_type_id_table, up_table, up_table))
if len(self.targets) > 0:
cursor = biana_cnx.cursor() # Start cursor to MySQL
for target in self.targets:
cursor.execute(query, (target,))
pfams = set()
for items in cursor:
for pfam in items:
pfams.add(pfam.upper())
cursor.close()
else:
print('There are no targets, so it is impossible to get the PFAMs!\n')
sys.exit(10)
if len(pfams) > 0:
self.pfams = list(pfams)
with open(output_file, 'w') as pfam_fd:
for pfam in self.pfams:
pfam_fd.write('{}\n'.format(pfam))
else:
print('No PFAMS found for the targets introduced: {}.\n'.format(', '.join(self.targets)))
return
def obtain_SMILES_from_file(self, smiles_file):
"""
Obtains the SMILES from an input file and stores them into a list.
The file must contain the SMILES separated by new lines.
"""
with open(smiles_file, 'r') as smiles_file_fd:
for line in smiles_file_fd:
self.smiles.append(line.strip())
return
def obtain_SMILES_from_table(self, drugbankids, drugbank_smiles_file):
"""
Obtains the SMILES of a drug from an input table and stores them into a list.
"""
drugbank_smiles_df = pd.read_csv(drugbank_smiles_file, sep='\t', index_col=None)
self.smiles = set(drugbank_smiles_df.loc[drugbank_smiles_df['#drugbankid'].isin(drugbankids), 'smiles'].tolist())
return
def obtain_SMILES_from_pickle(self, smiles_pickle_file, output_file):
"""
Obtains the SMILES from an input pickle file and stores them into a list.
"""
drug2smiles = pickle.load(open(smiles_pickle_file))
drug_id = self.drug_name.upper()
if drug_id in drug2smiles:
self.smiles = drug2smiles[drug_id]
else:
self.smiles = None
if len(self.smiles) > 0:
with open(output_file, 'w') as smiles_fd:
for result in self.smiles:
smiles_fd.write('{}\n'.format(result))
else:
print('No SMILES found for the drug {}.\n'.format(self.drug_name))
return
def obtain_SMILES_from_BIANA(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the SMILES from BIANA database using as query the name of the drug.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the SMILES in an output file.
If there is more than one different SMILES, they are printed separated by new lines.
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
type_name_table = self.return_drug_biana_table(self.type_name) # Obtain the table containing the type of name introduced
query = (''' SELECT S.value FROM {} N, {} U1, {} U2, externalEntitySMILES S
WHERE N.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = S.externalEntityID AND N.value = %s
'''.format(type_name_table, up_table, up_table))
cursor = biana_cnx.cursor() # Start cursor to MySQL
cursor.execute(query, (self.drug_name,))
smiles = set()
for items in cursor:
for result in items:
smiles.add(result)
cursor.close()
if len(smiles) > 0:
self.smiles = list(smiles)
with open(output_file, 'w') as smiles_fd:
for result in self.smiles:
smiles_fd.write('{}\n'.format(result))
else:
print('No SMILES found for the drug {}.\n'.format(self.drug_name))
return
def obtain_ATCs_from_file(self, ATCs_file):
"""
Obtains the ATCs from an input file and stores them into a list.
The file must contain the names of the ATCs separated by new lines.
"""
with open(ATCs_file, 'r') as ATC_file_fd:
for line in ATC_file_fd:
self.ATCs.append(line.strip())
self.level_to_ATCs = obtain_ATC_levels(self.ATCs)
return
def obtain_ATCs_from_table(self, drugbankids, drugbank_atc_file):
"""
Obtains the pfams of a list of targets (in gene ID) from an input table and stores them into a list.
"""
drugbank_atc_df = pd.read_csv(drugbank_atc_file, sep='\t', index_col=None)
atcs = drugbank_atc_df.loc[drugbank_atc_df['#drugbankid'].isin(drugbankids), 'atc'].tolist()
self.ATCs = set([atc.upper() for atc in atcs])
self.level_to_ATCs = obtain_ATC_levels(self.ATCs)
return
def obtain_ATCs_from_BIANA(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the ATCs from BIANA database using as query the targets.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the ATCs found in an output file.
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
type_name_table = self.return_drug_biana_table(self.type_name) # Obtain the table containing the type of name introduced
query = (''' SELECT A.value FROM {} N, {} U1, {} U2, externalEntityATC A
WHERE N.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = A.externalEntityID AND N.value = %s
'''.format(type_name_table, up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query, (self.drug_name,))
ATCs = set()
for items in cursor:
for ATC in items:
ATCs.add(ATC.upper())
cursor.close()
if len(ATCs) > 0:
self.ATCs = list(ATCs)
with open(output_file, 'w') as ATCs_fd:
for ATCs in self.ATCs:
ATCs_fd.write('{}\n'.format(ATCs))
else:
print(' DIANA INFO:\tNo ATCs for the drug introduced: {}.\n'.format(self.drug_name))
return
def obtain_ATCs_from_pickle(self, atc_pickle_file, output_file):
"""
Obtains the ATCs from an input pickle file and stores them into a list.
"""
drug2atcs = pickle.load(open(atc_pickle_file))
drug_id = self.drug_name.upper()
if drug_id in drug2atcs:
self.ATCs = drug2atcs[drug_id]
else:
self.ATCs = set()
if len(self.ATCs) > 0:
with open(output_file, 'w') as atc_fd:
for result in self.ATCs:
atc_fd.write('{}\n'.format(result))
else:
print('No ATCs found for the drug {}.\n'.format(self.drug_name))
return
def obtain_SE_from_file(self, SE_file):
"""
Obtains the SE from an input file and stores them into a list.
The file must contain the names of the SE separated by new lines.
"""
with open(SE_file, 'r') as SE_file_fd:
for line in SE_file_fd:
self.SEs.append(line.strip())
return
def obtain_SE_from_table(self, drugbankids, drugbank_side_effects_file):
"""
Obtains the side effects of a drug from an input table and stores them into a list.
"""
drugbank_side_effects_df = pd.read_csv(drugbank_side_effects_file, sep='\t', index_col=None)
self.SEs = set(drugbank_side_effects_df.loc[drugbank_side_effects_df['#drugbankid'].isin(drugbankids), 'umls_id'].tolist())
return
def obtain_SE_from_BIANA(self, biana_cnx, output_file, unification_protocol):
"""
Obtains the SE from BIANA database using as query the targets.
"biana_cnx" parameter stands for the variable containing the connexion to the MySQL database.
Stores the SE found in an output file.
"""
target_type_id_table = self.return_targets_biana_table(self.target_type_id)
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = (''' SELECT UD.value FROM {} D, {} U1, {} U2, externalEntityPubChemCompound PC, externalEntityRelationParticipant P1, externalEntityRelationParticipant P2, externalEntityRelation R, externalEntityUMLS_diseaseID UD
WHERE D.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = PC.externalEntityID AND PC.externalEntityID = P1.externalEntityID AND D.value = %s
AND P1.externalEntityRelationID = R.externalEntityRelationID AND P2.externalEntityRelationID = R.externalEntityRelationID AND P1.externalEntityID != P2.externalEntityID AND R.type = "drug_phenotype_association"
AND P2.externalEntityID = UD.externalEntityID
'''.format(target_type_id_table, up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query, (self.drug_name,))
SEs = set()
for items in cursor:
for SEs in items:
SEs.add(SEs.upper())
cursor.close()
if len(SEs) > 0:
self.SEs = list(SEs)
with open(output_file, 'w') and SEs_fd:
for SEs in self.SEs:
SEs_fd.write('{}\n'.format(SEs))
else:
print(' DIANA INFO:\tNo Side Effects for the drug introduced: {}.\n'.format(self.drug_name))
return
def obtain_SE_from_pickle(self, se_pickle_file, output_file):
"""
Obtains the side effects from an input pickle file and stores them into a list.
"""
drug2side_effects = pickle.load(open(se_pickle_file))
drug_id = self.drug_name.upper()
if drug_id in drug2side_effects:
self.SEs = drug2side_effects[drug_id]
else:
self.SEs = set()
if len(self.SEs) > 0:
with open(output_file, 'w') as se_fd:
for result in self.SEs:
se_fd.write('{}\n'.format(result))
else:
print('No SEs found for the drug {}.\n'.format(self.drug_name))
return
class InsufficientTargets(Exception):
"""
Exception raised when the number of targets is below 3.
This exception is raised because the analyses of GUILD with less than 3
targets are not reliable.
"""
def __init__(self, targets, limit_targets=1):
self.targets = targets
self.limit_targets = limit_targets
def __str__(self):
return 'The number of targets provided ({}) is insufficient.\nGUILD must have at least 1 target to run a reliable analysis.\n'.format(len(self.targets), self.limit_targets)
class DrugNameNotFound(Exception):
"""
Exception raised when the drug name is not found in BIANA.
"""
def __init__(self, drug_name, type_name):
self.drug_name = drug_name
self.type_name = type_name
def __str__(self):
return 'The drug {} {} has not been found in BIANA.\nTherefore, any target could be found. Please, introduce another name or the targets of the drug.\n'.format(self.type_name, self.drug_name)
class IncorrectTypeID(Exception):
"""
Exception that raises when a type of IDs of the proteins is not admitted for
the program.
"""
def __init__(self, target_type_id, target_type_id_to_table):
self.target_type_id = target_type_id
self.target_type_id_to_table = target_type_id_to_table
def __str__(self):
return 'The initial type of IDs of the proteins ({}) is not admitted.\nThe types of ID admitted in DIANA are: {}\n'.format(self.target_type_id, ', '.join(self.target_type_id_to_table.keys()))
def generate_diana_id(drug_name, targets, network_name):
"""
Generates an ID for the drug using the drug name, the sorted targets and
the network name.
"""
id_str = 'diana_'
drug_str = ''.join(drug_name.split('\s')) # Obtain the drug name, and remove any space in the name
id_str += drug_str.lower() # Add the drug name in the ID string
targets = [str(x) for x in targets] # Transform the targets to strings
targets_str = ''.join(sorted(targets)) # Join the targets in one string
id_str += targets_str.lower() # Add the targets in the ID string
id_str += network_name.lower() # Add the network name in the ID string
id_str=id_str.encode('utf-8') # Encode it by utf-8
m = hashlib.md5()
m.update(id_str) # Introduce the string in the hashlib instance
unique_id = m.hexdigest()[:12] # Obtain a unique ID from the string. Only get the first 12 characters
return unique_id
def old_generate_drug_id(drug_name, targets, network_name):
"""
Generates an ID for the drug using the drug name, the sorted targets and
the network name.
"""
id_str = ''
drug_str = ''.join(drug_name.split('\s')) # Obtain the drug name, and remove any space in the name
id_str += drug_str.lower() # Add the drug name in the ID string
targets = [str(x) for x in targets] # Transform the targets to strings
targets_str = ''.join(sorted(targets)) # Join the targets in one string
id_str += targets_str.lower() # Add the targets in the ID string
id_str += network_name.lower() # Add the network name in the ID string
id_str=id_str.encode('utf-8') # Encode it by utf-8
m = hashlib.md5()
m.update(id_str) # Introduce the string in the hashlib instance
unique_id = m.hexdigest()[:12] # Obtain a unique ID from the string. Only get the first 12 characters
return unique_id
def create_targets_file(targets, file_name):
"""
Creates a targets file, containing the targets separated by new line characters
"""
with open(file_name, 'w') as fw:
for target in targets:
fw.write('{}\n'.format(target))
return
def create_number_of_targets_file(drugbank_geneid_mapping_file, number_of_targets_file):
"""
Creates a file with the total number of different targets.
"""
drugbank_geneid_mappings_df = pd.read_csv(drugbank_geneid_mapping_file, sep='\t', index_col=None)
targets = set(drugbank_geneid_mappings_df['geneid'].tolist())
with open(number_of_targets_file, 'w') as number_of_targets_fd:
number_of_targets_fd.write('{}\n'.format(len(targets)))
return len(targets)
def create_number_of_pfams_file(geneid_target_mapping_file, number_of_pfams_file):
"""
Creates a file with the total number of different PFAMs associated to targets.
"""
geneid_mappings_df = pd.read_csv(geneid_target_mapping_file, sep='\t', index_col=None)
pfams_df = geneid_mappings_df[geneid_mappings_df['type_identifier'] == 'pfam']
#pfams_df = geneid_mappings_df[(geneid_mappings_df['#geneid'].isin(network_nodes)) & (geneid_mappings_df['type_identifier'] == 'pfam')]
pfams = set([pfam.upper() for pfam in pfams_df['identifier'].tolist()])
with open(number_of_pfams_file, 'w') as number_of_pfams_fd:
number_of_pfams_fd.write('{}\n'.format(len(pfams)))
return len(pfams)
def get_all_targets_from_mappings(drugbank_geneid_mapping_file):
"""
Get all targets from the drugbank geneid mapping file.
"""
drugbank_geneid_mappings_df = pd.read_csv(drugbank_geneid_mapping_file, sep='\t', index_col=None)
targets = set(map(str, drugbank_geneid_mappings_df['geneid'].tolist()))
return targets
def get_all_pfams_from_mappings(geneid_target_mapping_file):
"""
Get all PFAMs from the geneid target mapping file
"""
geneid_mappings_df = pd.read_csv(geneid_target_mapping_file, sep='\t', index_col=None)
pfams_df = geneid_mappings_df[geneid_mappings_df['type_identifier'] == 'pfam']
pfams = set([pfam.upper() for pfam in pfams_df['identifier'].tolist()])
return pfams
def get_all_atcs_from_mappings(drugbank_atc_file):
"""
Get all ATCs from the drugbank ATC mapping file
"""
drugbank_atc_df = pd.read_csv(drugbank_atc_file, sep='\t', index_col=None)
atcs = set(drugbank_atc_df['atc'].tolist())
level_to_ATCs = obtain_ATC_levels(atcs)
return level_to_ATCs
def get_all_ses_from_mappings(drugbank_side_effects_file):
"""
Get all side effects from the drugbank side effects mapping file
"""
drugbank_side_effects_df = pd.read_csv(drugbank_side_effects_file, sep='\t', index_col=None)
ses = set(drugbank_side_effects_df['umls_id'].tolist())
return ses
def read_number_file(number_file):
"""
Reads the file with the total number of targets/PFAMs.
"""
with open(number_file, 'r') as number_fd:
number_of_entities = int(number_fd.readline().strip("\n"))
return number_of_entities
def return_unification_protocol_table(biana_cnx, unification_protocol):
"""
Returns the table that contains the Unification Protocol
introduced as query
"""
query = (''' SELECT unificationProtocolID FROM userEntityUnificationProtocol
WHERE description = %s ''')
cursor = biana_cnx.cursor() # Start cursor to MySQL
cursor.execute(query, (unification_protocol,))
up_ids = []
for items in cursor:
for up in items:
up_ids.append(up)
up_id = up_ids[0]
up_table = 'userEntityUnification_protocol_'+str(up_id)
cursor.close()
return up_table
def obtain_ATC_levels(ATCs):
"""
Obtain the 5 levels of ATCs from an ATC list
"""
level_to_ATCs = {'level1':[], 'level2':[], 'level3':[], 'level4':[], 'level5':[]}
for ATC in ATCs:
level_to_ATCs['level1'].append(ATC[0]) # e.g. A
level_to_ATCs['level2'].append(ATC[0:3]) # e.g. A10
level_to_ATCs['level3'].append(ATC[0:4]) # e.g. A10B
level_to_ATCs['level4'].append(ATC[0:5]) # e.g. A10BA
level_to_ATCs['level5'].append(ATC) # e.g. A10BA02
return level_to_ATCs
def obtain_drugbank_to_targets(biana_cnx, unification_protocol, sif_file, output_pickle_file):
"""
Obtains a file containing the targets of every DrugBank drug
"""
# Get all the nodes in the network
all_nodes = set()
with open(sif_file, 'r') as sif_file_fd:
for line in sif_file_fd:
node1, score, node2 = line.strip().split('\t')
all_nodes.add(int(node1))
all_nodes.add(int(node2))
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
query1 = (''' SELECT value FROM externalEntityDrugBankID ''')
query2 = (''' SELECT G.value FROM externalEntityDrugBankID D, externalEntityRelationParticipant R1, externalEntityRelationParticipant R2, externalEntityDrugBank_targetID T, {} U2, {} U3, externalEntityGeneID G
WHERE D.externalEntityID = R1.externalEntityID AND R1.externalEntityRelationID = R2.externalEntityRelationID AND R2.externalEntityID = T.externalEntityID AND T.targetType = "therapeutic"
AND R2.externalEntityID = U2.externalEntityID AND U2.userEntityID = U3.userEntityID AND U3.externalEntityID = G.externalEntityID AND D.value = %s
'''.format(up_table, up_table))
cursor.execute(query1)
drugbank2targets = {}
drugbank_ids = set()
# Search for all the DrugbankIDs
for items in cursor:
for drugbankid in items:
drugbank_ids.add(drugbankid)
# Search for the geneIDs interacting with the drugs
for drugbankid in drugbank_ids:
geneids = set()
cursor.execute(query2, (drugbankid,))
for items in cursor:
for geneid in items:
geneids.add(geneid)
if len(geneids) > 0:
for geneid in geneids:
if geneid in all_nodes:
drugbank2targets.setdefault(drugbankid, set())
drugbank2targets[drugbankid].add(geneid)
cursor.close()
print(drugbank2targets)
pickle.dump(drugbank2targets, open(output_pickle_file, 'wb'))
return drugbank2targets
def obtain_dcdb_to_targets(biana_cnx, unification_protocol, sif_file, output_pickle_file):
"""
Obtains a file containing the targets of every DCDB drug
"""
# Get all the nodes in the network
all_nodes = set()
with open(sif_file, 'r') as sif_file_fd:
for line in sif_file_fd:
node1, score, node2 = line.strip().split('\t')
all_nodes.add(int(node1))
all_nodes.add(int(node2))
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
query1 = (''' SELECT value FROM externalEntityDCDB_drugID ''')
query2 = (''' SELECT G.value FROM externalEntityDCDB_drugID D, {} U1, {} U2, externalEntity E2, externalEntityRelationParticipant R2, externalEntityRelationParticipant R3, externalEntityDrugBank_targetID T, {} U3, {} U4, externalEntityGeneID G
WHERE D.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = E2.externalEntityID AND E2.type = 'drug'
AND E2.externalEntityID = R2.externalEntityID AND R2.externalEntityRelationID = R3.externalEntityRelationID AND R3.externalEntityID = T.externalEntityID AND T.targetType = "therapeutic"
AND R3.externalEntityID = U3.externalEntityID AND U3.userEntityID = U4.userEntityID AND U4.externalEntityID = G.externalEntityID AND D.value = %s
'''.format(up_table, up_table, up_table, up_table))
cursor.execute(query1)
dcdb2targets = {}
dcdb_ids = set()
# Search for all the DCDB drug IDs
for items in cursor:
for dcdbid in items:
dcdb_ids.add(dcdbid)
# Search for the geneIDs interacting with the drugs
for dcdbid in dcdb_ids:
geneids = set()
cursor.execute(query2, (dcdbid,))
for items in cursor:
for geneid in items:
geneids.add(geneid)
if len(geneids) > 0:
for geneid in geneids:
if geneid in all_nodes:
dcdb2targets.setdefault(dcdbid, set())
dcdb2targets[dcdbid].add(geneid)
cursor.close()
print(dcdb2targets)
pickle.dump(dcdb2targets, open(output_pickle_file, 'wb'))
return dcdb2targets
def obtain_target_to_pfam(biana_cnx, unification_protocol, all_targets, output_pickle_file):
"""
Obtains a file containing the PFAMs of every target
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
query = (''' SELECT P.value FROM externalEntityGeneID G, {} U1, {} U2, externalEntityPFAM P
WHERE G.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = P.externalEntityID AND G.value = %s
'''.format(up_table, up_table))
geneid2pfam = {}
# Search the PFAMS of the targets
for target in all_targets:
pfams = set()
cursor.execute(query, (target,))
for items in cursor:
for pfam in items:
pfams.add(pfam)
if len(pfams) > 0:
geneid2pfam[target] = pfams
cursor.close()
print(geneid2pfam)
pickle.dump(geneid2pfam, open(output_pickle_file, 'wb'))
return geneid2pfam
def obtain_drug_to_smiles(biana_cnx, unification_protocol, all_drugs, type_drug_name, output_pickle_file):
"""
Obtains a file containing the SMILES of every drug.
The type of drug name must be indicated (drugbank, dcdb, name)
"""
type_name_to_table = {
'name' : 'externalEntityName',
'drugbank' : 'externalEntityDrugBankID',
'dcdb' : 'externalEntityDCDB_drugID',
}
type_drug_table = type_name_to_table[type_drug_name] # Obtain the table containing the type of name introduced
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
# Obtain the SMILES for the drugs
cursor = biana_cnx.cursor() # Start cursor to MySQL
query = (''' SELECT S.value FROM {} D, {} U1, {} U2, externalEntitySMILES S
WHERE D.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = S.externalEntityID AND D.value = %s
'''.format(type_drug_table, up_table, up_table))
drug2smiles = {}
for drug in all_drugs:
smiles = set()
cursor.execute(query, (drug,))
for items in cursor:
for result in items:
smiles.add(result)
if len(smiles) > 0:
drug2smiles[drug] = smiles
cursor.close()
# Obtain the PubChem IDs for the drugs
cursor = biana_cnx.cursor()
query = (''' SELECT D.value, P.value FROM {} D, {} U1, {} U2, externalEntityPubChemCompound P
WHERE D.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = P.externalEntityID
'''.format(type_drug_table, up_table, up_table))
cursor.execute(query)
drug2pubchems = {}
for items in cursor:
drug2pubchems.setdefault(items[0], set())
drug2pubchems[items[0]].add(items[1])
cursor.close()
for drug in all_drugs:
if drug not in drug2smiles:
if drug in drug2pubchems:
for pubchem in drug2pubchems[drug]:
command = 'wget https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/{}/property/CanonicalSMILES/XML -O out.xml'.format(pubchem)
os.system(command)
smiles_regex = re.compile("<CanonicalSMILES>(.+)</CanonicalSMILES>")
with open('out.xml', 'r') as f:
for line in f:
m = smiles_regex.search(line)
if m:
smiles = m.group(1)
print(smiles)
drug2smiles.setdefault(drug, set())
drug2smiles[drug].add(smiles)
os.system('rm out.xml')
print(drug2smiles)
pickle.dump(drug2smiles, open(output_pickle_file, 'wb'))
return drug2smiles
def obtain_drug_to_atcs(biana_cnx, unification_protocol, all_drugs, type_drug_name, output_pickle_file):
"""
Obtains a file containing the ATCs of every drug.
The type of drug name must be indicated (drugbank, dcdb, name)
"""
type_name_to_table = {
'name' : 'externalEntityName',
'drugbank' : 'externalEntityDrugBankID',
'dcdb' : 'externalEntityDCDB_drugID',
}
type_drug_table = type_name_to_table[type_drug_name] # Obtain the table containing the type of name introduced
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
# Obtain the ATCs for the drugs
cursor = biana_cnx.cursor() # Start cursor to MySQL
query = (''' SELECT A.value FROM {} D, {} U1, {} U2, externalEntityATC A
WHERE D.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = A.externalEntityID AND D.value = %s
'''.format(type_drug_table, up_table, up_table))
drug2atcs = {}
for drug in all_drugs:
ATCs = set()
cursor.execute(query, (drug,))
for items in cursor:
for result in items:
ATCs.add(result)
if len(ATCs) > 0:
drug2atcs[drug] = ATCs
print(drug2atcs)
pickle.dump(drug2atcs, open(output_pickle_file, 'wb'))
cursor.close()
def obtain_drug_to_side_effects(biana_cnx, unification_protocol, all_drugs, type_drug_name, output_pickle_file):
"""
Obtains a file containing the side effects associated to every drug.
The type of drug name must be indicated (drugbank, dcdb, name).
"""
type_name_to_table = {
'name' : 'externalEntityName',
'drugbank' : 'externalEntityDrugBankID',
'dcdb' : 'externalEntityDCDB_drugID',
}
type_drug_table = type_name_to_table[type_drug_name] # Obtain the table containing the type of name introduced
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
# Obtain the ATCs for the drugs
cursor = biana_cnx.cursor() # Start cursor to MySQL
query = (''' SELECT UD.value FROM {} D, {} U1, {} U2, externalEntityPubChemCompound PC, externalEntityRelationParticipant P1, externalEntityRelationParticipant P2, externalEntityRelation R, externalEntityUMLS_diseaseID UD
WHERE D.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = PC.externalEntityID AND PC.externalEntityID = P1.externalEntityID AND D.value = %s
AND P1.externalEntityRelationID = R.externalEntityRelationID AND P2.externalEntityRelationID = R.externalEntityRelationID AND P1.externalEntityID != P2.externalEntityID AND R.type = "drug_phenotype_association"
AND P2.externalEntityID = UD.externalEntityID
'''.format(type_drug_table, up_table, up_table))
drug2side_effects = {}
for drug in all_drugs:
side_effects = set()
cursor.execute(query, (drug,))
for items in cursor:
for result in items:
side_effects.add(result)
if len(side_effects) > 0:
drug2side_effects[drug] = side_effects
print(drug2side_effects)
pickle.dump(drug2side_effects, open(output_pickle_file, 'wb'))
cursor.close()
def obtain_drugbank_to_names(biana_cnx, drugs, output_pickle_file):
"""
Obtains the names associated to DrugbankIDs.
"""
cursor = biana_cnx.cursor() # Start cursor to MySQL
query = (''' SELECT N.value
FROM externalEntityDrugBankID D, externalEntityName N
WHERE D.externalEntityID = N.externalEntityID AND (N.type = "unique" OR N.type = "brand") AND D.value = %s
''')
drugbank_to_names = {}
for drug in drugs:
cursor.execute(query, (drug,))
names = set()
for items in cursor:
for name in items:
names.add(name)
if len(names)>0:
for name in names:
drugbank_to_names.setdefault(drug, set())
drugbank_to_names[drug].add(name.lower())
cursor.close()
print(drugbank_to_names)
pickle.dump(drugbank_to_names, open(output_pickle_file, 'wb'))
return drugbank_to_names
def obtain_drug_interaction_to_drugs(biana_cnx, output_pickle_file):
"""
Obtain a dictionary drug_interaction : [drugs]
"""
cursor = biana_cnx.cursor()
query = ('''SELECT I.value, D.value from externalEntityRelation R, externalEntityRelationParticipant P, externalEntityDCDB_drugID D, externalEntityDCDB_druginteractionID I
WHERE R.externalEntityRelationID = P.externalEntityRelationID AND R.type = "interaction" AND P.externalEntityID = D.externalEntityID AND R.externalEntityRelationID = I.externalEntityID
''')
cursor.execute(query)
drug_int_2_drugs = {}
for items in cursor:
drug_int = items[0]
drug = items[1]
drug_int_2_drugs.setdefault(drug_int, [])
drug_int_2_drugs[drug_int].append(drug)
cursor.close()
print(drug_int_2_drugs)
pickle.dump(drug_int_2_drugs, open(output_pickle_file, 'wb'))
return drug_int_2_drugs
def obtain_drug_interaction_to_info(biana_cnx, output_pickle_file):
"""
Obtain a dictionary drug_interaction : { 'type' : ... , 'classification' : ... }
"""
cursor = biana_cnx.cursor()
query = (''' SELECT value, interactionType, classification from externalEntityDCDB_druginteractionID ''')
cursor.execute(query)
drug_int_2_info = {}
for items in cursor:
drug_int = items[0]
type_int = items[1]
class_int = items[2]
drug_int_2_info.setdefault(drug_int, {})
drug_int_2_info[drug_int]['type'] = type_int
drug_int_2_info[drug_int]['classification'] = class_int
cursor.close()
print(drug_int_2_info)
pickle.dump(drug_int_2_info, open(output_pickle_file, 'wb'))
return drug_int_2_info
def obtain_dcdb_to_drugbank(biana_cnx, unification_protocol, output_pickle_file):
"""
Obtain a dictionary {dcdb : drugbank}
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = ('''SELECT DC.value, DB.value FROM externalEntityDCDB_drugID DC, {} U1, {} U2, externalEntityDrugBankID DB
WHERE DC.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = DB.externalEntityID
'''.format(up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query)
dcdb_to_drugbank = {}
for items in cursor:
dcdb = items[0]
drugbank = items[1]
dcdb_to_drugbank.setdefault(dcdb, set())
dcdb_to_drugbank[dcdb].add(drugbank)
cursor.close()
print(dcdb_to_drugbank)
pickle.dump(dcdb_to_drugbank, open(output_pickle_file, 'wb'))
return dcdb_to_drugbank
def obtain_pubchem_to_drugbank(biana_cnx, unification_protocol, output_pickle_file):
"""
Obtain a dictionary {pubchem : drugbank}
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
query = ('''SELECT PC.value, DB.value FROM externalEntityPubChemCompound PC, {} U1, {} U2, externalEntityDrugBankID DB
WHERE PC.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = DB.externalEntityID
'''.format(up_table, up_table))
cursor = biana_cnx.cursor()
cursor.execute(query)
pubchem_to_drugbank = {}
for items in cursor:
pubchem = items[0]
drugbank = items[1].upper()
pubchem_to_drugbank.setdefault(pubchem, set())
pubchem_to_drugbank[pubchem].add(drugbank)
cursor.close()
print(pubchem_to_drugbank)
pickle.dump(pubchem_to_drugbank, open(output_pickle_file, 'wb'))
return pubchem_to_drugbank
def obtain_target_to_bio_processes(biana_cnx, unification_protocol, all_targets, output_pickle_file):
"""
Obtains a dictionary containing the targets (in Entrez GeneID) and their corresponding biological processes.
Record a pickle file.
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
query1 = (''' SELECT GO.value FROM externalEntityGeneID G, {} U1, {} U2, externalEntityGO GO
WHERE G.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = GO.externalEntityID AND G.value = %s
'''.format(up_table, up_table))
query2 = (''' SELECT GO.value FROM externalEntityGO GO, externalEntityGO_type T
WHERE GO.externalEntityID = T.externalEntityID AND T.value = 'biological_process' AND GO.value = %s
''')
target_to_bio_processes = {}
go_to_bio = {}
num_tar = len(all_targets)
for target in all_targets:
print(target)
GOs = set()
cursor.execute(query1, (target,))
for items in cursor:
for go in items:
GOs.add(go)
bio_proc = set()
for go in GOs:
if go in go_to_bio:
for bp in go_to_bio[go]:
bio_proc.add(bp)
else:
cursor.execute(query2, (go,))
for items in cursor:
for bp in items:
bio_proc.add(bp)
go_to_bio.setdefault(go, set())
go_to_bio[go].add(bp)
if len(bio_proc) > 0:
target_to_bio_processes[target] = bio_proc
print(bio_proc)
num_tar -= 1
print('{} left'.format(num_tar))
cursor.close()
print(target_to_bio_processes)
pickle.dump(target_to_bio_processes, open(output_pickle_file, 'wb'))
return target_to_bio_processes
def obtain_target_to_pathways(biana_cnx, unification_protocol, all_targets, output_pickle_file):
"""
Obtains a dictionary containing the targets (in Entrez GeneID) and their corresponding pathways.
Record a pickle file.
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
query = (''' SELECT Re.value FROM externalEntityGeneID G, {} U1, {} U2, externalEntityRelationParticipant P1, externalEntityRelationParticipant P2, externalEntityReactome Re
WHERE G.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID
AND U2.externalEntityID = P1.externalEntityID AND P1.externalEntityRelationID = P2.externalEntityRelationID AND P2.externalEntityID = Re.externalEntityID AND G.value = %s
'''.format(up_table, up_table))
target_to_pathways = {}
num_tar = len(all_targets)
for target in all_targets:
print(target)
pathways = set()
cursor.execute(query, (target,))
for items in cursor:
for path in items:
pathways.add(path)
if len(pathways) > 0:
print(pathways)
target_to_pathways[target] = pathways
num_tar -= 1
print('{} left'.format(num_tar))
cursor.close()
print(target_to_pathways)
pickle.dump(target_to_pathways, open(output_pickle_file, 'wb'))
return target_to_pathways
def find_drugbank_id_from_name(biana_cnx, unification_protocol, drug_name):
"""
Obtains the DrugBank ID of the drug from its name, if it is in the database.
If it is not in the Database, it returns None.
"""
up_table = return_unification_protocol_table(biana_cnx, unification_protocol)
cursor = biana_cnx.cursor() # Start cursor to MySQL
query = (''' SELECT D.value FROM externalEntityName N, {} U1, {} U2, externalEntityDrugBankID D
WHERE N.externalEntityID = U1.externalEntityID AND U1.userEntityID = U2.userEntityID AND U2.externalEntityID = D.externalEntityID AND N.value = %s
'''.format(up_table, up_table))
cursor.execute(query, (drug_name,))
drugbank_ids = set()
for items in cursor:
for db in items:
drugbank_ids.add(db)
cursor.close()
if len(drugbank_ids) == 0:
return None
else:
return drugbank_ids
|
mit
|
gpotter2/scapy
|
setup.py
|
2
|
3463
|
#! /usr/bin/env python
"""
Distutils setup file for Scapy.
"""
try:
from setuptools import setup, find_packages
except:
raise ImportError("setuptools is required to install scapy !")
import io
import os
def get_long_description():
"""Extract description from README.md, for PyPI's usage"""
def process_ignore_tags(buffer):
return "\n".join(
x for x in buffer.split("\n") if "<!-- ignore_ppi -->" not in x
)
try:
fpath = os.path.join(os.path.dirname(__file__), "README.md")
with io.open(fpath, encoding="utf-8") as f:
readme = f.read()
desc = readme.partition("<!-- start_ppi_description -->")[2]
desc = desc.partition("<!-- stop_ppi_description -->")[0]
return process_ignore_tags(desc.strip())
except IOError:
return None
# https://packaging.python.org/guides/distributing-packages-using-setuptools/
setup(
name='scapy',
version=__import__('scapy').VERSION,
packages=find_packages(),
data_files=[('share/man/man1', ["doc/scapy.1"])],
package_data={
'scapy': ['VERSION'],
},
# Build starting scripts automatically
entry_points={
'console_scripts': [
'scapy = scapy.main:interact',
'UTscapy = scapy.tools.UTscapy:main'
]
},
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
# pip > 9 handles all the versioning
extras_require={
'basic': ["ipython"],
'complete': [
'ipython',
'pyx',
'cryptography>=2.0',
'matplotlib'
],
'docs': [
'sphinx>=3.0.0',
'sphinx_rtd_theme>=0.4.3',
'tox>=3.0.0'
]
},
# We use __file__ in scapy/__init__.py, therefore Scapy isn't zip safe
zip_safe=False,
# Metadata
author='Philippe BIONDI',
author_email='phil(at)secdev.org',
maintainer='Pierre LALET, Gabriel POTTER, Guillaume VALADON',
description='Scapy: interactive packet manipulation tool',
long_description=get_long_description(),
long_description_content_type='text/markdown',
license='GPLv2',
url='https://scapy.net',
project_urls={
'Documentation': 'https://scapy.readthedocs.io',
'Source Code': 'https://github.com/secdev/scapy/',
},
download_url='https://github.com/secdev/scapy/tarball/master',
keywords=["network"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Security",
"Topic :: System :: Networking",
"Topic :: System :: Networking :: Monitoring",
]
)
|
gpl-2.0
|
fzenke/morla
|
scripts/compute_gramian.py
|
1
|
5542
|
#!/usr/bin/python3
from __future__ import print_function
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import numpy as np
import scipy
from scipy import sparse
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
import gzip
import pickle
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from time import time
import django
from django.utils import timezone
from django.db.models import Max
django.setup()
from papers.models import Article, Feature, Profile, Recommendation, Similarity
import papers.utils as utils
from compute_feature_vectors import *
feature_weights = { 'title' : 1.0, 'authors' : 0.5, 'abstract' : 1.0, 'keywords' : 1.0 }
def compute_gramian(start_block=0, maxblock=None, cutoff=0.33, block_size=1000):
""" Computes the gramian matrix from feature vectores stored in the databse
The function computes the sparse Gramian from feature vectors stored in the database.
To avoid loading the entire Gramian in memory, it is build from block matrices which are
computed individually, sparsified (with a cutoff value) and then combined to one large
sparse matrix.
params:
block_size The block size for the sub matrices
start_block Assumes that the Gramian until start_block is already computed and only computes the rest
maxblock Limits the block number to a maximum (for testing). When set to None the entire matrix is computed
cutoff The cutoff value for the scalar product.
returns:
Sparse matrix Gramian in coo format
"""
articles = Article.objects.all()
nb_articles = articles.count()
bs = block_size
if maxblock is None:
lb = nb_articles//bs+1
else:
lb = maxblock
# Building metric M
offset = 0
row = []
data = []
for key in utils.feature_dims.keys():
print(key)
n = utils.feature_dims[key]
row.append(np.arange(offset,offset+n))
data.append(feature_weights[key]*np.ones(n))
offset += n
row = np.concatenate(row)
data = np.concatenate(data)
col = row # we want a diagonal matrix
M = scipy.sparse.coo_matrix((data, (row, col)))
M = scipy.sparse.csc_matrix(M) # convert to csr format
print("Computing Gramian for %i articles in blocks of %i..."%(nb_articles, block_size))
blocks = [ [None for i in range(lb)] for j in range(lb) ]
for i in tqdm(range(lb)):
lower_row = int(i*bs)
upper_row = int((i+1)*bs)
if i==lb-1: upper_row=nb_articles
for j in range(i,lb):
if j<start_block: continue
lower_col = int(j*bs)
upper_col = int((j+1)*bs)
if j==lb-1: upper_col=nb_articles
tmp = utils.get_features_from_db(Article.objects.all()[lower_row:upper_row])
A = scipy.sparse.csc_matrix(tmp)
tmp = utils.get_features_from_db(Article.objects.all()[lower_col:upper_col])
# B = scipy.sparse.csr_matrix(tmp)
B = scipy.sparse.csc_matrix(tmp.transpose())
C = A.dot(M.dot(B))
if i==j:
C = sparse.tril(C,-1)
C = C.multiply(C >= cutoff )
blocks[i][j] = C
print("Saving to db...")
data = sparse.bmat(blocks,'coo')
# Remove any junk above the blocksize limit
Similarity.objects.filter(a__gt=start_block*block_size).delete()
Similarity.objects.filter(b__gt=start_block*block_size).delete()
print("sparseness=%f"%(1.0*data.nnz/np.prod(data.shape)))
add_similarities_to_db(articles[:nb_articles], data)
def add_similarities_to_db(articles, C, commit_count=5000):
off = C.shape[0]-C.shape[1]
transaction = []
for i in tqdm(range(len(articles))):
a = articles[i]
# All all elements in row of similarity matrix
row = C.getrow(i)
_,idx,vals = sparse.find(row)
transaction.extend( [ Similarity( a=a, b=articles[int(k+off)], value=v) for k,v in zip(idx, vals) ] )
if len(transaction)>commit_count:
Similarity.objects.bulk_create( transaction )
transaction = []
Similarity.objects.bulk_create( transaction )
def compute_features():
articles, data = get_articles_without_features()
if len(data):
print("Computing features for %i articles..."%(len(data)))
features = get_features(data)
print("Adding %i feature vectors to db... "%(features.shape[0]))
add_features_to_db(articles, features)
def update_gramian(block_size=1000):
# Find first article without similarities
start_block = 0
if Similarity.objects.all().count():
qres = Similarity.objects.all().aggregate(Max('a'))
start_block = qres['a__max']//block_size
print("Resuming at article_id=%i"%qres['a__max'])
# Compute the remaining blocks and add them to DB
compute_gramian(start_block=start_block, block_size=block_size)
def rebuild_full_gramian():
Similarity.objects.all().delete()
compute_gramian()
if __name__ == "__main__":
print("Checking for missing feature vectors...")
compute_features()
print("Updating full Gramian...")
# Similarity.objects.all().delete()
rebuild_full_gramian()
# update_gramian()
|
mit
|
blaisb/cfdemUtilities
|
mixing/pca/pcaGenerator.py
|
2
|
4377
|
#--------------------------------------------------------------------------------------------------
#
# Description : Sample program to generate random trajectories and to analyse them using PCA
#
# Usage : python pcaMixingRadial
#
#
# Author : Bruno Blais
#
#--------------------------------------------------------------------------------------------------
# Imports
import os
import sys
import numpy
import time
import matplotlib.pyplot as plt
import pylab
from mpl_toolkits.mplot3d import Axes3D
plot = False
write = True
writeFreq=20
analysis = False
#*********************************************
# User imput parameters
# Available styles : gauss, rotation, taylor
#*********************************************
vStyle="lamb"
vR=0.0
vTheta=2*3.14159* 0.005
timeEnd=320
timeStart=100
if (vStyle=="rotation" or vStyle=="gauss"):
vTheta=2*3.14159* 0.005
elif (vStyle=="taylor"):
vTheta=2*3.14159* 0.05
elif (vStyle=="lamb"):
vTheta=2*3.14159* 0.20
R1=0.50
# Calculation of reduced deviation
def reDev(x):
y = 1./numpy.std(x,ddof=1) * (x-numpy.mean(x))
return y
# Write LAMMPS format output file
def writeFile(i,x,y,z,ptype,npart):
outname=sys.argv[1]+"_00"+str(i).zfill(7)+".dump"
print "Writing the file : ", outname
outfile=open(outname,'w')
outfile.write("ITEM: TIMESTEP\n")
outfile.write("%i\n" %i);
outfile.write("ITEM: NUMBER OF ATOMS\n")
outfile.write("%i\n" %npart);
outfile.write("ITEM: BOX BOUNDS ff ff ff\n-0.15 0.15\n-0.15 0.15\n-5e-06 0.300005\n")
outfile.write("ITEM: ATOMS id type type x y z vx vy vz fx fy fz radius\n")
x2=numpy.reshape(x,numpy.size(x))
y2=numpy.reshape(y,numpy.size(x))
z2=numpy.reshape(z,numpy.size(x))
ptype2=numpy.reshape(ptype,numpy.size(x))
for i in range(0,numpy.size(x)):
if (x2[i]**2+y2[i]**2<=1):
outfile.write("%i 1 %i %f %f %f 1 1 1 1 1 1 1\n" %(i,ptype2[i],x2[i],y2[i],z2[i]))
nx, ny = (20, 20)
x = numpy.linspace(-1, 1, nx)
y = numpy.linspace(-1, 1, ny)
xv=numpy.zeros([nx,ny])
yv=numpy.zeros([nx,ny])
ptype=numpy.ones([nx,ny])
numberWithinCircle=0
#Clean array to generate circle
for i,xx in enumerate(x):
for j,yy in enumerate(y):
if ((xx*xx+yy*yy)<=1):
numberWithinCircle+=1
xv[i,j]=xx
yv[i,j]=yy
if (((xx*xx+yy*yy)<=R1) and xx>0):
ptype[i,j]=1
if (((xx*xx+yy*yy)<=R1) and xx<0):
ptype[i,j]=2
if (((xx*xx+yy*yy)>=R1) and xx>0):
ptype[i,j]=3
if (((xx*xx+yy*yy)>=R1) and xx<0):
ptype[i,j]=4
if (xx==0):
ptype[i,j]=1
rv=(xv*xv+yv*yv)**(1./2.)+1e-20
tv=numpy.arctan2(yv,xv)
#r = numpy.linspace(0.01,1,nx)
#tv = numpy.linspace(0.,2*3.14159,ny)
# Initialize figure for trajectories
#-------------------------------------
rvl=rv
tvl=tv
C=numpy.zeros([2,2])
lamL=[]
tt=[]
lamt=[]
laml1=[]
laml2=[]
laml3=[]
for t in range(0,timeEnd):
if (t>timeStart and t<timeEnd-timeStart):
if (vStyle=="gauss"):
ur = 0
ut = vTheta + 10*vTheta*(numpy.random.random_sample([ny,nx])-0.5)
else:
ur = 0
ut = vTheta
rvl = rvl + ur
if (vStyle=="taylor"):
tvl = tvl + ut/rvl*0.1
elif (vStyle=="lamb"):
tvl = tvl + ut / (2.*numpy.pi*rvl) * (1- numpy.exp(-rvl**2))
else:
tvl = tvl + ut
zvl = numpy.random.random_sample([ny,nx])
xvl = rvl * numpy.cos(tvl)
yvl = rvl * numpy.sin(tvl)
if (t%5==0 and plot):
ax.scatter(xvl[::nx+1],yvl[::nx+1],zvl[::nx+1],'o')
if (analysis):
#Construct correlation matrix
C[0,0]=numpy.mean(reDev(rvl)*reDev(rv))
C[1,0]=numpy.mean(reDev(tvl)*reDev(rv))
C[0,1]=numpy.mean(reDev(rvl)*reDev(tv))
C[1,1]=numpy.mean(reDev(tvl)*reDev(tv))
M = C*C.transpose()
lam,R=numpy.linalg.eig(M)
if (t==0):
lamInit=lam
RInit=R
lam0=numpy.max(lam)
lamt.append([numpy.max(lam)/lam0])
tt.append([t])
#lAx.scatter(t,numpy.sqrt(numpy.max(lam)/lam0))
lamL.extend([lam])
laml1.extend([lam[0]])
laml2.append([lam[1]])
if (write and t%writeFreq ==0 ):
writeFile(t,xvl,yvl,zvl,ptype,numberWithinCircle)
plt.plot(tt,laml1,tt,laml2)
plt.show()
|
lgpl-3.0
|
manashmndl/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
348
|
6232
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
bsd-3-clause
|
mbaumBielefeld/popkin
|
popkin/visualization/placefieldvisualizer.py
|
1
|
3753
|
import matplotlib.pyplot as plt
import numpy as np
import math
class PlaceFieldVisualizer:
def __init__(self, fields):
self.fields=fields
self.n_fields=len(fields)
self.callbacks={}
self.callbacks_click={}
self.fignum=56
fig=plt.figure(self.fignum)
cid = fig.canvas.mpl_connect('button_press_event', self.onClick)
cid_k=fig.canvas.mpl_connect('key_press_event', self.onKeyPress)
#layout_rows=2
#layout_cols=max(len(self.arm.fields_sm),len(self.arm.fields_hidden)+1)
#layout_cols=max(layout_cols,4)
layout_rows=(self.n_fields+2)/3
layout_cols=min(self.n_fields,3)
panel_layout=layout_rows*100+10*layout_cols
self.subplot_activation={}
for i,f in enumerate(fields):
self.subplot_activation[f]=plt.subplot(panel_layout+i+1)
self.subplot_activation[f].set_title(f.name)
f.plot_activation(self.subplot_activation[f])
'''
self.subplot_arm_schematic=plt.subplot(panel_layout+len(self.arm.fields_sm)+1)
self.subplot_arm_schematic.set_xlim(-self.arm.field_ee.radius_max,self.arm.field_ee.radius_max)
self.subplot_arm_schematic.set_ylim(-self.arm.field_ee.radius_max,self.arm.field_ee.radius_max)
self.subplot_arm_target_line=plt.Line2D([0.0,0.0],[0.0,0.0])
self.subplot_arm_schematic.add_line(self.subplot_arm_target_line)
'''
'''
self.subplot_poses={}
for i,hidden in enumerate(self.arm.fields_hidden):
field_sum,field_summand0,field_summand1=hidden.fields_sm
radius_max=field_sum.radius_max
plot_id=len(self.arm.fields_sm)+2+i
self.subplot_poses[hidden]=plt.subplot(layout_rows,layout_cols,plot_id)
self.subplot_poses[hidden].set_xlim(-radius_max,radius_max)
self.subplot_poses[hidden].set_ylim(-radius_max,radius_max)
self.subplot_poses[hidden].set_title(hidden.name)
'''
def start(self):
self.refreshPlot()
plt.show()
def onClick(self, event):
#print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# event.button, event.x, event.y, event.xdata, event.ydata)
position=np.array([event.xdata,event.ydata])
#click_position_polar=np.array([(click_position[0]**2.0+click_position[1]**2.0)**0.5,math.atan2(click_position[1],click_position[0])])
#left mouse-button
if event.button in self.callbacks_click.keys():
for f in self.fields:
if f in self.subplot_activation:
if event.inaxes == self.subplot_activation[f]:
position[0]=position[0]/f.estimation_grid_resolution*(f.domain[0][1]-f.domain[0][0])+f.domain[0][0]
position[1]=-(position[1]/f.estimation_grid_resolution*(f.domain[1][1]-f.domain[1][0])+f.domain[1][0])
print "EVIDENCE: ", f, position
self.callbacks_click[event.button](f,position)
self.refreshPlot()
def onKeyPress(self, event):
if event.key in self.callbacks.keys():
self.callbacks[event.key]()
self.refreshPlot()
def addCallbackClick(self, button, callback):
self.callbacks_click[button]=callback
def addCallback(self, key, callback):
self.callbacks[key]=callback
def refreshPlot(self):
plt.figure(self.fignum)
for field, subplot in self.subplot_activation.items():
subplot.cla()
subplot.set_title(field.name)
field.plot_activation(subplot)
plt.draw()
|
gpl-2.0
|
Xinglab/rmats2sashimiplot
|
src/MISO/misopy/sashimi_plot/plot_utils/plot_gene.py
|
1
|
33223
|
##
## Draw gene structure from a GFF file
##
import os, sys, operator, subprocess
import math
import pysam
import numpy as np
import glob
from pylab import *
from matplotlib.patches import PathPatch
from matplotlib.path import Path
import matplotlib.cm as cm
import misopy
import misopy.gff_utils as gff_utils
import misopy.sam_utils as sam_utils
from misopy.sashimi_plot.Sashimi import Sashimi
import misopy.sashimi_plot.plot_utils.plotting as plotting
import misopy.sashimi_plot.plot_utils.plot_settings as plot_settings
from misopy.sashimi_plot.plot_utils.plotting import show_spines
from misopy.parse_gene import parseGene
def plot_density_single(settings, sample_label,
tx_start, tx_end, gene_obj, mRNAs, strand,
graphcoords, graphToGene, bam_group, axvar, chrom,
paired_end=False,
intron_scale=30,
exon_scale=4,
color='r',
ymax=None,
logged=False,
coverage=1,
number_junctions=True,
resolution=.5,
showXaxis=True,
showYaxis=True,
nyticks=3,
nxticks=4,
show_ylabel=True,
show_xlabel=True,
font_size=6,
junction_log_base=10,
plot_title=None,
plot_label=None):
"""
Plot MISO events using BAM files and posterior distribution files.
TODO: If comparison files are available, plot Bayes factors too.
"""
wiggle = zeros((tx_end - tx_start + 1), dtype='f')
jxns = {}
bamfile_num = len(bam_group)
all_c = []
for i in range(bamfile_num):
file_name = os.path.expanduser(bam_group[i])
bamfile = pysam.Samfile(file_name, 'rb')
try:
subset_reads = bamfile.fetch(reference=chrom, start=tx_start,end=tx_end)
except ValueError as e:
print "Error retrieving files from %s: %s" %(chrom, str(e))
print "Are you sure %s appears in your BAM file?" %(chrom)
print "Aborting plot..."
return axvar
# wiggle, jxns = readsToWiggle_pysam(subset_reads, tx_start, tx_end)
# p1 = subprocess.Popen(["samtools", "view", "-F", "0x4", file_name,], stdout=subprocess.PIPE)
# p2 = subprocess.Popen(["cut", "-f", "1",], stdin=p1.stdout, stdout=subprocess.PIPE)
# p3 = subprocess.Popen(["sort",], stdin=p2.stdout, stdout=subprocess.PIPE)
# p4 = subprocess.Popen(["uniq",], stdin=p3.stdout, stdout=subprocess.PIPE)
# p5 = subprocess.Popen(["wc", "-l",], stdin=p4.stdout, stdout=subprocess.PIPE)
# p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
# p2.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
# p3.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
# p4.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
# output,err = p5.communicate()
p1 = subprocess.Popen(["samtools", "idxstats", file_name,], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk", "{s+=$3} END {print s}",], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output,err = p2.communicate()
if err:
print err
print 'Setting the number of mapped read to 1.'
cover = 1
else:
cover = int(output) / 1e6
all_c.append(cover)
readsToWiggle_pysam(subset_reads, tx_start, tx_end, wiggle, jxns)
coverage = np.mean(all_c)
wiggle = 1e3 * wiggle / coverage / bamfile_num
# junction_width_scale = settings["junction_width_scale"]
for j_key in jxns.keys():
jxns[j_key] = int(round(1.0 * jxns[j_key] / bamfile_num, 0))
# gene_reads = sam_utils.fetch_bam_reads_in_gene(bamfile, gene_obj.chrom,\
# tx_start, tx_end, gene_obj)
# reads, num_raw_reads = sam_utils.sam_parse_reads(gene_reads,\
# paired_end=paired_end)
# wiggle, jxns = readsToWiggle(reads, tx_start, tx_end)
#wiggle = 1e3 * wiggle / coverage
if logged:
wiggle = log10(wiggle + 1)
maxheight = max(wiggle)
if ymax is None:
ymax = 1.1 * maxheight
else:
ymax = ymax
ymin = -.5 * ymax
# Reduce memory footprint by using incremented graphcoords.
compressed_x = []
compressed_wiggle = []
prevx = graphcoords[0]
tmpval = []
for i in range(len(graphcoords)):
tmpval.append(wiggle[i])
if abs(graphcoords[i] - prevx) > resolution:
compressed_wiggle.append(mean(tmpval))
compressed_x.append(prevx)
prevx = graphcoords[i]
tmpval = []
fill_between(compressed_x, compressed_wiggle,\
y2=0, color=color, lw=0)
sslists = []
for mRNA in mRNAs:
tmp = []
for s, e in mRNA:
tmp.extend([s, e])
sslists.append(tmp)
min_counts = settings["min_counts"] # if the jxn is smaller than it, then omit the text plotting
show_text_background = settings["text_background"]
maxy = 0
for jxn in jxns:
leftss, rightss = map(int, jxn.split(":"))
ss1, ss2 = [graphcoords[leftss - tx_start - 1],\
graphcoords[rightss - tx_start]]
mid = (ss1 + ss2) / 2
h = -3 * ymin / 4
numisoforms = 0
for i in range(len(mRNAs)):
if leftss in sslists[i] and \
rightss in sslists[i]:
numisoforms += 1
if numisoforms > 0:
if numisoforms % 2 == 0: # put on bottom
pts = [(ss1, 0), (ss1, -h), (ss2, -h), (ss2, 0)]
midpt = cubic_bezier(pts, .5)
else: # put on top
leftdens = wiggle[leftss - tx_start - 1]
rightdens = wiggle[rightss - tx_start]
pts = [(ss1, leftdens),
(ss1, leftdens + h),
(ss2, rightdens + h),
(ss2, rightdens)]
midpt = cubic_bezier(pts, .5)
if min_counts == 0 or jxns[jxn] >= min_counts:
if number_junctions:
if show_text_background:
txt = text(midpt[0], midpt[1], '%s'%(jxns[jxn]),
fontsize=font_size-2, ha='center', va='center', backgroundcolor='w')
else:
txt = text(midpt[0], midpt[1], '%s' % (jxns[jxn]),
fontsize=font_size-2, ha='center', va='center')
interval = axvar.get_ylim()[1]*0.05
y = interval + midpt[1]
maxy = max(maxy, y)
a = Path(pts, [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4])
p = PathPatch(a, ec=color, lw=log(jxns[jxn] + 1) /\
log(junction_log_base) * (jxns[jxn] + 1)**0.33 * 0.1, fc='none', clip_on=False)
axvar.add_patch(p)
# Format plot
# ylim(ymin, ymax)
# axvar.spines['left'].set_bounds(0, ymax)
axvar.spines['right'].set_color('none')
axvar.spines['top'].set_color('none')
if showXaxis:
axvar.xaxis.set_ticks_position('bottom')
xlabel('Genomic coordinate (%s), "%s" strand'%(gene_obj.chrom,
strand),
fontsize=font_size)
max_graphcoords = max(graphcoords) - 1
coords_fontsize = font_size - (font_size * 0.2)
xticks(linspace(0, max_graphcoords, nxticks),
[graphToGene[int(x)] for x in \
linspace(0, max_graphcoords, nxticks)],
fontsize=coords_fontsize)
else:
axvar.spines['bottom'].set_color('none')
xticks([])
# if showYaxis:
# axvar.yaxis.set_ticks_position('left')
# yticks(linspace(0, ymax, nyticks), ['%d'%(x) for x in \
# linspace(0, ymax, nyticks)],
# fontsize=font_size)
# else:
# axvar.spines['left'].set_color('none')
# yticks([])
xlim(0, max(graphcoords))
# Return modified axis
return axvar, maxy
def analyze_group_info(group_info, bam_files, original_labels):
"""
to analyze the group file '*.gf'
:return: group_files, sample_labels, sample_colors
"""
has_inc_level = True
inc_levels = []
label_prefixs = []
for label in original_labels:
# the orginal label can be '.* IncLevel: 0.78'. The catch the incLevel 0.78
label_split = label.split(' ')
label_prefixs.append(label_split[0])
if 'IncLevel' not in label:
has_inc_level = False
continue
inc_levels.append(float(label_split[-1]))
gf_path = os.path.expanduser(group_info)
group_file = open(gf_path, 'r')
group_files = []
sample_labels = []
group_num = 0
for line in group_file:
try:
line = line.strip() # line = 'group 1: 1-3, 4, 5-6'
if line == '':
continue # if there are blank lines
group_name, file_names = line.split(':')
file_names = file_names.split(',')
# split the file index and find the corresponding bam_file
files = []
inc = 0
num_file = 0
prefix = ''
for item in file_names:
if '-' in item:
start, end = map(int, item.split('-'))
for i in range(start, end+1):
files.append(bam_files[i-1]) # here we suppose that the index of files begins from 0
prefix = label_prefixs[i-1]
if not has_inc_level:
continue
inc += inc_levels[i-1]
num_file += end + 1 - start
else:
files.append(bam_files[int(item)-1])
prefix = label_prefixs[int(item)-1]
num_file += 1
if not has_inc_level:
continue
inc += inc_levels[int(item)-1]
group_files.append(files)
pre_group_name = prefix + ' ' + group_name
group_num += 1
if not has_inc_level:
sample_labels.append(group_name)
continue
pre_group_name += " IncLevel: {0:.2f}".format(inc/num_file)
sample_labels.append(pre_group_name)
except:
print 'read grouping info failed.'
group_file.close()
sys.exit(1)
sample_colors = cm.rainbow(np.linspace(0, 1, group_num)) * 0.85
group_file.close()
# the last magic number means the darkness of the rainbow colors
return group_files, sample_labels, sample_colors
# Plot density for a series of bam files.
def plot_density(sashimi_obj, pickle_filename, event, plot_title=None, group_info=None):
# intron_scale=30, exon_scale=1, gene_posterior_ratio=5, posterior_bins=40,
# colors=None, ymax=None, logged=False, show_posteriors=True, coverages=None,
# number_junctions=True, resolution=.5, fig_width=8.5, fig_height=11,
# font_size=6, junction_log_base=10, reverse_minus=False,
# bar_posterior=False):
# Get the settings we need
settings = sashimi_obj.settings
bam_files = settings["bam_files"]
miso_files = settings["miso_files"]
intron_scale = settings["intron_scale"]
exon_scale = settings["exon_scale"]
gene_posterior_ratio = settings["gene_posterior_ratio"]
posterior_bins = settings["posterior_bins"]
colors = settings["colors"]
ymax = settings["ymax"]
logged = settings["logged"]
show_posteriors = settings["show_posteriors"]
coverages = settings["coverages"]
number_junctions = settings["number_junctions"]
resolution = settings["resolution"]
junction_log_base = settings["junction_log_base"]
reverse_minus = settings["reverse_minus"]
bar_posterior = settings["bar_posteriors"]
font_size = settings["font_size"]
nyticks = settings["nyticks"]
nxticks = settings["nxticks"]
show_ylabel = settings["show_ylabel"]
show_xlabel = settings["show_xlabel"]
if plot_title is None:
plot_title = event
print "Using intron scale ", intron_scale
print "Using exon scale ", exon_scale
# Always show y-axis for read densities for now
showYaxis = True
# Parse gene pickle file to get information about gene
tx_start, tx_end, exon_starts, exon_ends, gene_obj, mRNAs, strand, chrom = \
parseGene(pickle_filename, event)
# Get the right scalings
graphcoords, graphToGene = getScaling(tx_start, tx_end, strand,
exon_starts, exon_ends, intron_scale,
exon_scale, reverse_minus)
if group_info is not None:
group_files, group_labels, group_colors = analyze_group_info(group_info, bam_files, settings["sample_labels"])
settings["sample_labels"] = group_labels
# if the group color is customized by the user
if len(colors) != len(group_colors):
print('\033[0;31;m') # change the print color as red
print("The number of custom colors is {0} which doesn't match the group number {1}. The program uses the "
"rainbow color as default.".format(len(colors), len(group_colors)))
print('\033[0m') # set the color as default value
colors = settings["colors"] = group_colors
nfiles = len(group_files)
else:
nfiles = len(bam_files)
group_files = []
# if the group_info is not provided, also produce a group_file = [['bam1'],['bam2'],...]
for i in range(nfiles):
group_files.append([bam_files[i]])
if plot_title is not None:
# Use custom title if given
suptitle(plot_title, fontsize=10)
else:
suptitle(event, fontsize=10)
plotted_axes = []
maxys = np.zeros(nfiles)
for i in range(nfiles):
if colors is not None:
color = colors[i]
else:
color = None
if coverages is not None:
coverage = coverages[i]
else:
coverage = 1
if i < nfiles - 1:
showXaxis = False
else:
showXaxis = True
bam_group = group_files[i] # ['./testData/S1.R1.test.bam','./testData/S1.R2.test.bam']
# bam_file = os.path.expanduser(bam_files[i])
ax1 = subplot2grid((nfiles + 3, gene_posterior_ratio), (i, 0),
colspan=gene_posterior_ratio - 1)
# Read sample label
sample_label = settings["sample_labels"][i]
print "Reading sample label: %s" %(sample_label)
# print "Processing BAM: %s" %(bam_file)
plotted_ax, maxy = plot_density_single(settings, sample_label,
tx_start, tx_end, gene_obj, mRNAs, strand,
graphcoords, graphToGene, bam_group, ax1, chrom,
paired_end=False, intron_scale=intron_scale,
exon_scale=exon_scale, color=color,
ymax=ymax, logged=logged, coverage=coverage,
number_junctions=number_junctions, resolution=resolution,
showXaxis=showXaxis, nyticks=nyticks, nxticks=nxticks,
show_ylabel=show_ylabel, show_xlabel=show_xlabel,
font_size=font_size,
junction_log_base=junction_log_base)
plotted_axes.append(plotted_ax)
maxys[i] = maxy
if show_posteriors:
miso_file = os.path.expanduser(miso_files[i])
try:
ax2 = subplot2grid((nfiles + 3, gene_posterior_ratio),\
(i, gene_posterior_ratio - 1))
if not os.path.isfile(miso_file):
print "Warning: MISO file %s not found" %(miso_file)
print "Loading MISO file: %s" %(miso_file)
plot_posterior_single(miso_file, ax2, posterior_bins,
showXaxis=showXaxis, show_ylabel=False,
font_size=font_size,
bar_posterior=bar_posterior)
except:
box(on=False)
xticks([])
yticks([])
print "Posterior plot failed."
##
## Figure out correct y-axis values
##
ymax_vals = []
if ymax != None:
# Use user-given ymax values if provided
max_used_yval = ymax
else:
# Compute best ymax value for all samples: take
# maximum y across all.
used_yvals = [curr_ax.get_ylim()[1] for curr_ax in plotted_axes]
# Round up
max_used_yval = math.ceil(max(used_yvals))
# Reset axes based on this.
# Set fake ymin bound to allow lower junctions to be visible
fake_ymin = -0.6 * max_used_yval
universal_yticks = linspace(0, max_used_yval,
nyticks + 1)
# Round up yticks
universal_ticks = map(math.ceil, universal_yticks)
for sample_num, curr_ax in enumerate(plotted_axes):
if showYaxis:
curr_ax.set_ybound(lower=fake_ymin, upper=max_used_yval)
curr_yticklabels = []
for label in universal_yticks:
if label <= 0:
# Exclude label for 0
curr_yticklabels.append("")
else:
if label % 1 != 0:
curr_yticklabels.append("%.1f" %(label))
else:
curr_yticklabels.append("%d" %(label))
curr_ax.set_yticklabels(curr_yticklabels,
fontsize=font_size)
curr_ax.spines["left"].set_bounds(0, max_used_yval)
curr_ax.set_yticks(universal_yticks)
curr_ax.yaxis.set_ticks_position('left')
curr_ax.spines["right"].set_color('none')
if show_ylabel:
y_horz_alignment = 'left'
if logged:
curr_ax.set_ylabel('RPKM $(\mathregular{\log}_{\mathregular{10}})$',
fontsize=font_size,
ha=y_horz_alignment)
else:
curr_ax.set_ylabel('RPKM',
fontsize=font_size,
va="bottom",
ha=y_horz_alignment)
else:
curr_ax.spines["left"].set_color('none')
curr_ax.spines["right"].set_color('none')
curr.ax.set_yticks([])
##
## Plot sample labels
##
sample_color = colors[sample_num]
# Make sample label y position be halfway between highest
# and next to highest ytick
if len(universal_yticks) >= 2:
halfway_ypos = (universal_yticks[-1] - universal_yticks[-2]) / 2.
label_ypos = universal_yticks[-2] + halfway_ypos
else:
label_ypos = universal_yticks[-1]
curr_label = settings["sample_labels"][sample_num]
curr_ax.text(max(graphcoords), max(label_ypos, maxys[sample_num]),
curr_label,
fontsize=font_size,
va='bottom',
ha='right',
color=sample_color)
# Draw gene structure
ax = subplot2grid((nfiles + 3, gene_posterior_ratio), (nfiles + 1, 0),
colspan=gene_posterior_ratio - 1, rowspan=2)
plot_mRNAs(tx_start, mRNAs, strand, graphcoords, reverse_minus)
subplots_adjust(hspace=.10, wspace=.7)
def getScaling(tx_start, tx_end, strand, exon_starts, exon_ends,
intron_scale, exon_scale, reverse_minus):
"""
Compute the scaling factor across various genic regions.
"""
exoncoords = zeros((tx_end - tx_start + 1))
for i in range(len(exon_starts)):
exoncoords[exon_starts[i] - tx_start : exon_ends[i] - tx_start] = 1
graphToGene = {}
graphcoords = zeros((tx_end - tx_start + 1), dtype='f')
x = 0
if strand == '+' or not reverse_minus:
for i in range(tx_end - tx_start + 1):
graphcoords[i] = x
graphToGene[int(x)] = i + tx_start
if exoncoords[i] == 1:
x += 1. / exon_scale
else:
x += 1. / intron_scale
else:
for i in range(tx_end - tx_start + 1):
graphcoords[-(i + 1)] = x
graphToGene[int(x)] = tx_end - i + 1
if exoncoords[-(i + 1)] == 1:
x += 1. / exon_scale
else:
x += 1. / intron_scale
return graphcoords, graphToGene
def readsToWiggle_pysam(reads, tx_start, tx_end, wiggle, jxns):
"""
Convert reads to wiggles; uses pysam.
"""
# wiggle = zeros((tx_end - tx_start + 1), dtype='f')
# jxns = {}
for read in reads:
# Skip reads with no CIGAR string
if read.cigar is None:
print "Skipping read with no CIGAR string: %s" %(read.cigar)
continue
cigar_str = sam_utils.sam_cigar_to_str(read.cigar)
# if ("N" in cigar_str) and (cigar_str.count("N") > 1):
# print "Skipping read with multiple junctions crossed: %s" \
# %(cigar_str)
# continue
# Check if the read contains an insertion (I)
# or deletion (D) -- if so, skip it
skipit = False
for cigar_part in read.cigar:
if cigar_part[0] == 1 or \
cigar_part[0] == 2:
print "Skipping read with CIGAR %s" \
%(cigar_str)
skipit = True
if skipit:
continue
aligned_positions = read.positions
for i, pos in enumerate(aligned_positions):
if pos < tx_start or pos > tx_end:
# print "=>",pos
continue
wig_index = pos-tx_start
wiggle[wig_index] += 1./read.qlen
try:
# if there is a junction coming up
if aligned_positions[i+1] > pos + 1:
leftss = pos+1
rightss= aligned_positions[i+1]+1
if leftss > tx_start and leftss < tx_end \
and rightss > tx_start and rightss < tx_end:
jxn = ":".join(map(str, [leftss, rightss]))
try:
jxns[jxn] += 1
except:
jxns[jxn] = 1
except:
pass
return
# return wiggle, jxns
# def readsToWiggle(reads, tx_start, tx_end):
# """
# Get wiggle and junction densities from reads.
# """
# read_positions, read_cigars = reads
# wiggle = zeros((tx_end - tx_start + 1), dtype='f')
# jxns = {}
# for i in range(len(read_positions)):
# pos, cigar = [read_positions[i], read_cigars[i]]
# if "N" not in cigar:
# rlen = int(cigar[:-1])
# s = max([pos - tx_start, 0])
# e = min([pos - tx_start + rlen, len(wiggle) - 1])
# wiggle[s : e] += 1. / rlen
# else:
# left, right = cigar.split("N")
# left, middle = map(int, left.split("M"))
# right = int(right[:-1])
# rlen = left + right
# s1 = pos - tx_start
# e1 = pos - tx_start + left
# s2 = pos + left + middle - tx_start
# e2 = pos + left + middle + right - tx_start
# # Include read coverage from adjacent junctions.
# if (e1 >= 0 and e1 < len(wiggle)) or (s1 >= 0 and s1 < len(wiggle)):
# wiggle[max([s1, 0]) : min([e1, len(wiggle)])] += 1. / rlen
# if (e2 >= 0 and e2 < len(wiggle)) or (s2 >= 0 and s2 < len(wiggle)):
# wiggle[max([s2, 0]) : min([e2, len(wiggle)])] += 1. / rlen
# # Plot a junction if both splice sites are within locus.
# leftss = pos + left
# rightss = pos + left + middle + 1
# if leftss - tx_start >= 0 and leftss - tx_start < len(wiggle) \
# and rightss - tx_start >= 0 and rightss - tx_start < \
# len(wiggle):
# jxn = ":".join(map(str, [leftss, rightss]))
# try:
# jxns[jxn] += 1
# except:
# jxns[jxn] = 1
# return wiggle, jxns
def plot_mRNAs(tx_start, mRNAs, strand, graphcoords, reverse_minus):
"""
Draw the gene structure.
"""
yloc = 0
exonwidth = .3
narrows = 50
for mRNA in mRNAs:
for s, e in mRNA:
s = s - tx_start
e = e - tx_start
x = [graphcoords[s], graphcoords[e], graphcoords[e], graphcoords[s]]
y = [yloc - exonwidth / 2, yloc - exonwidth / 2,\
yloc + exonwidth / 2, yloc + exonwidth / 2]
fill(x, y, 'k', lw=.5, zorder=20)
# Draw intron.
axhline(yloc, color='k', lw=.5)
# Draw intron arrows.
spread = .2 * max(graphcoords) / narrows
for i in range(narrows):
loc = float(i) * max(graphcoords) / narrows
if strand == '+' or reverse_minus:
x = [loc - spread, loc, loc - spread]
else:
x = [loc + spread, loc, loc + spread]
y = [yloc - exonwidth / 5, yloc, yloc + exonwidth / 5]
plot(x, y, lw=.5, color='k')
yloc += 1
xlim(0, max(graphcoords))
ylim(-.5, len(mRNAs) + .5)
box(on=False)
xticks([])
yticks([])
def plot_posterior_single(miso_f, axvar, posterior_bins,
showXaxis=True,
showYaxis=True,
show_ylabel=True,
font_size=6,
bar_posterior=False):
"""
Plot a posterior probability distribution for a MISO event.
"""
posterior_bins = int(posterior_bins)
psis = []
for line in open(miso_f):
if not line.startswith("#") and not line.startswith("sampled"):
psi, logodds = line.strip().split("\t")
psis.append(float(psi.split(",")[0]))
ci = .95
alpha = 1 - ci
lidx = int(round((alpha / 2) * len(psis)) - 1)
# the upper bound is the (1-alpha/2)*n nth smallest sample, where n is
# the number of samples
hidx = int(round((1 - alpha / 2) * len(psis)) - 1)
psis.sort()
clow, chigh = [psis[lidx], psis[hidx]]
nyticks = 4
if not bar_posterior:
y, x, p = hist(psis, linspace(0, 1, posterior_bins),\
normed=True, facecolor='k', edgecolor='w', lw=.2)
axvline(clow, ymin=.33, linestyle='--', dashes=(1, 1), color='#CCCCCC', lw=.5)
axvline(chigh, ymin=.33, linestyle='--', dashes=(1, 1), color='#CCCCCC', lw=.5)
axvline(mean(psis), ymin=.33, color='r')
ymax = max(y) * 1.5
ymin = -.5 * ymax
# "$\Psi$ = %.2f\n$\Psi_{0.05}$ = %.2f\n$\Psi_{0.95}$ = %.2f" %\
text(1, ymax,
"$\Psi$ = %.2f\n[%.2f, %.2f]" % \
(mean(psis), clow, chigh),
fontsize=font_size,
va='top',
ha='left')
ylim(ymin, ymax)
axvar.spines['left'].set_bounds(0, ymax)
axvar.spines['right'].set_color('none')
axvar.spines['top'].set_color('none')
axvar.spines['bottom'].set_position(('data', 0))
axvar.xaxis.set_ticks_position('bottom')
axvar.yaxis.set_ticks_position('left')
if showYaxis:
yticks(linspace(0, ymax, nyticks),\
["%d"%(y) for y in linspace(0, ymax, nyticks)],\
fontsize=font_size)
else:
yticks([])
if show_ylabel:
ylabel("Frequency", fontsize=font_size, ha='right', va='center')
else:
##
## Plot a horizontal bar version of the posterior distribution,
## showing only the mean and the confidence bounds.
##
mean_psi_val = mean(psis)
clow_err = mean_psi_val - clow
chigh_err = chigh - mean_psi_val
errorbar([mean_psi_val], [1],
xerr=[[clow_err], [chigh_err]],
fmt='o',
ms=4,
ecolor='k',
markerfacecolor="#ffffff",
markeredgecolor="k")
text(1, 1,
"$\Psi$ = %.2f\n[%.2f, %.2f]" % \
(mean(psis), clow, chigh),
fontsize=font_size,
va='top',
ha='left')
yticks([])
# Use same x-axis for all subplots
# but only show x-axis labels for the bottom plot
xlim([0, 1])
psi_axis_fontsize = font_size - (font_size * 0.3)
xticks([0, .2, .4, .6, .8, 1], fontsize=psi_axis_fontsize)
if (not bar_posterior) and showYaxis:
axes_to_show = ['bottom', 'left']
else:
axes_to_show = ['bottom']
# Adjust x-axis to be lighter
axis_size = 0.2
tick_size = 1.2
axis_color = "k"
for shown_axis in axes_to_show:
if shown_axis in axvar.spines:
print "Setting color on %s axis" %(shown_axis)
axvar.spines[shown_axis].set_linewidth(axis_size)
axvar.xaxis.set_tick_params(size=tick_size,
color=axis_color)
if showXaxis:
from matplotlib.ticker import FormatStrFormatter
majorFormatter = FormatStrFormatter('%g')
axvar.xaxis.set_major_formatter(majorFormatter)
[label.set_visible(True) for label in axvar.get_xticklabels()]
xlabel("MISO $\Psi$", fontsize=font_size)
show_spines(axvar, axes_to_show)
else:
show_spines(axvar, axes_to_show)
[label.set_visible(False) for label in axvar.get_xticklabels()]
def cubic_bezier(pts, t):
"""
Get points in a cubic bezier.
"""
p0, p1, p2, p3 = pts
p0 = array(p0)
p1 = array(p1)
p2 = array(p2)
p3 = array(p3)
return p0 * (1 - t)**3 + 3 * t * p1 * (1 - t) ** 2 + \
3 * t**2 * (1 - t) * p2 + t**3 * p3
def plot_density_from_file(settings_f, pickle_filename, event,
output_dir,
group_info=None,
no_posteriors=False,
plot_title=None,
plot_label=None):
"""
Read MISO estimates given an event name.
"""
##
## Read information about gene
##
tx_start, tx_end, exon_starts, exon_ends, gene_obj, mRNAs, strand, chrom = \
parseGene(pickle_filename, event)
# Override settings flag on whether to show posterior plots
# if --no-posteriors was given to plot.py
sashimi_obj = Sashimi(event, output_dir,
event=event,
chrom=chrom,
settings_filename=settings_f,
no_posteriors=no_posteriors)
print "Plotting read densities and MISO estimates along event..."
print " - Event: %s" %(event)
settings = sashimi_obj.settings
if no_posteriors:
settings["show_posteriors"] = False
# bam_files = settings['bam_files']
# miso_files = settings['miso_files']
# Setup the figure
sashimi_obj.setup_figure()
plot_density(sashimi_obj, pickle_filename, event, group_info=group_info,
plot_title=plot_title)
# Save figure
sashimi_obj.save_plot(plot_label=plot_label)
# intron_scale=settings["intron_scale"],
# exon_scale=settings["exon_scale"],
# gene_posterior_ratio=settings["gene_posterior_ratio"],
# posterior_bins=settings["posterior_bins"],
# show_posteriors=settings["show_posteriors"],
# logged=settings["logged"],
# colors=settings["colors"],
# ymax=settings["ymax"],
# coverages=settings["coverages"],
# number_junctions=settings["number_junctions"],
# resolution=settings["resolution"],
# fig_width=settings["fig_width"],
# fig_height=settings["fig_height"],
# font_size=settings["font_size"],
# junction_log_base=settings["junction_log_base"],
# reverse_minus=settings["reverse_minus"],
# bar_posterior=settings["bar_posteriors"])
|
gpl-2.0
|
JPFrancoia/scikit-learn
|
benchmarks/bench_plot_omp_lars.py
|
28
|
4471
|
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import matplotlib.pyplot as plt
fig = plt.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i+1)
vmax = max(1 - timings.min(), -1 + timings.max())
plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
plt.xlabel('n_samples')
plt.ylabel('n_features')
plt.title(label)
plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = plt.axes([0.1, 0.08, 0.8, 0.06])
plt.colorbar(cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
nathanhilbert/ulmo
|
ulmo/util/misc.py
|
3
|
9838
|
from contextlib import contextmanager
import datetime
import email.utils
import ftplib
import functools
import os
import re
import urlparse
import warnings
import appdirs
from lxml import etree
import numpy as np
import pandas
import requests
# pre-compiled regexes for underscore conversion
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
class DependencyError(Exception):
pass
def camel_to_underscore(s):
"""converts camelCase to underscore, originally from
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
first_sub = first_cap_re.sub(r'\1_\2', s)
return all_cap_re.sub(r'\1_\2', first_sub).lower()
def convert_date(date):
"""returns a datetime.date object from either a string representation or
date-like object (datetime.date, datetime.datetime, or pandas.Timestamp)
"""
return pandas.Timestamp(date).date()
def convert_datetime(datetime):
"""returns a datetime.date object from either a string representation or
datetime-like object (datetime.date, datetime.datetime, or pandas.Timestamp)
"""
return pandas.Timestamp(datetime).to_datetime()
def dir_list(url):
"""given a path to a ftp directory, returns a list of files in that
directory
"""
parsed = urlparse.urlparse(url)
ftp = ftplib.FTP(parsed.netloc, "anonymous")
ftp.cwd(parsed.path)
return ftp.nlst()
def dict_from_dataframe(dataframe):
if isinstance(dataframe.index, pandas.PeriodIndex)\
or isinstance(dataframe.index, pandas.DatetimeIndex):
dataframe.index = [str(i) for i in dataframe.index]
# convert np.nan objects to None objects; prior to pandas 0.13.0 this could
# be done in a vectorized way, but as of 0.13, assigning None into a
# dataframe, it gets converted to a nan object so this has to be done
# rather inefficiently in a post-processing step
if pandas.__version__ < '0.13.0':
for column_name in dataframe.columns:
dataframe[column_name][pandas.isnull(dataframe[column_name])] = None
df_dict = dataframe.T.to_dict()
else:
df_dict = dict([
(k, _nans_to_nones(v))
for k, v in dataframe.T.to_dict().iteritems()
])
return df_dict
def download_if_new(url, path, check_modified=True):
"""downloads the file located at `url` to `path`, if check_modified is True
it will only download if the url's last-modified header has a more recent
date than the filesystem's last modified date for the file
"""
parsed = urlparse.urlparse(url)
if os.path.exists(path) and not check_modified:
return
if parsed.scheme.startswith('ftp'):
_ftp_download_if_new(url, path, check_modified)
elif parsed.scheme.startswith('http'):
_http_download_if_new(url, path, check_modified)
else:
raise NotImplementedError("only ftp and http urls are currently implemented")
def get_ulmo_dir(sub_dir=None):
return_dir = appdirs.user_data_dir('ulmo', 'ulmo')
if sub_dir:
return_dir = os.path.join(return_dir, sub_dir)
mkdir_if_doesnt_exist(return_dir)
return return_dir
def mkdir_if_doesnt_exist(dir_path):
"""makes a directory if it doesn't exist"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def module_with_dependency_errors(method_names):
class FakeModule(object):
pass
fake_module = FakeModule()
for method_name in method_names:
def f(*args, **kwargs):
raise_dependency_error()
f.__name__ = method_name
setattr(fake_module, method_name, f)
return fake_module
def module_with_deprecation_warnings(functions, warning_message):
warnings.filterwarnings("always", warning_message, DeprecationWarning)
class DeprecatedModule(object):
pass
deprecated_module = DeprecatedModule()
def warning_decorator(f):
@functools.wraps(f)
def warning_wrapper(*args, **kwargs):
warnings.warn(warning_message, DeprecationWarning)
return f(*args, **kwargs)
return warning_wrapper
for function in functions:
wrapped_function = warning_decorator(function)
setattr(deprecated_module, function.__name__, wrapped_function)
return deprecated_module
@contextmanager
def open_file_for_url(url, path, check_modified=True, use_file=None):
"""Context manager that returns an open file handle for a data file;
downloading if necessary or otherwise using a previously downloaded file.
File downloading will be short-circuited if use_file is either a file path
or an open file-like object (i.e. file handler or StringIO obj), in which
case the file handler pointing to use_file is returned - if use_file is a
file handler then the handler won't be closed upon exit.
"""
leave_open = False
if use_file is not None:
if isinstance(use_file, basestring):
open_path = use_file
if hasattr(use_file, 'read'):
leave_open = True
yield use_file
else:
download_if_new(url, path, check_modified)
open_path = path
open_file = open(open_path, 'rb')
yield open_file
if not leave_open:
open_file.close()
def parse_fwf(file_path, columns, na_values=None):
"""Convenience function for parsing fixed width formats. Wraps the pandas
read_fwf parser but allows all column information to be kept together.
Columns should be an iterable of lists/tuples with the format (column_name,
start_value, end_value, converter). Returns a pandas dataframe.
"""
names, colspecs = zip(*[(name, (start, end))
for name, start, end, converter in columns])
converters = dict([
(name, converter)
for name, start, end, converter in columns
if not converter is None
])
return pandas.io.parsers.read_fwf(file_path,
colspecs=colspecs, header=None, na_values=na_values, names=names,
converters=converters)
def raise_dependency_error(*args, **kwargs):
raise DependencyError("Trying to do something that depends on pytables, "
"but pytables has not been installed.")
def save_pretty_printed_xml(filename, response_buffer):
"""saves a nicely indented version of the xml contained in response_buffer
to filename; handy for debugging or saving responses for to include in tests"""
with open(filename, 'w') as f:
response_buffer.seek(0)
parsed = etree.parse(response_buffer)
f.write(etree.tostring(parsed, pretty_print=True))
response_buffer.seek(0)
def _ftp_download_if_new(url, path, check_modified=True):
parsed = urlparse.urlparse(url)
ftp = ftplib.FTP(parsed.netloc, "anonymous")
directory, filename = parsed.path.rsplit('/', 1)
ftp_last_modified = _ftp_last_modified(ftp, parsed.path)
ftp_file_size = _ftp_file_size(ftp, parsed.path)
if not os.path.exists(path) or os.path.getsize(path) != ftp_file_size:
_ftp_download_file(ftp, parsed.path, path)
elif check_modified and _path_last_modified(path) < ftp_last_modified:
_ftp_download_file(ftp, parsed.path, path)
def _ftp_download_file(ftp, ftp_path, local_path):
with open(local_path, 'wb') as f:
ftp.retrbinary("RETR " + ftp_path, f.write)
def _ftp_file_size(ftp, file_path):
ftp.sendcmd('TYPE I')
return ftp.size(file_path)
def _ftp_last_modified(ftp, file_path):
timestamp = ftp.sendcmd("MDTM " + file_path).split()[-1]
return datetime.datetime.strptime(timestamp, '%Y%m%d%H%M%S')
def _http_download_file(url, path):
request = requests.get(url)
mkdir_if_doesnt_exist(os.path.dirname(path))
chunk_size = 64 * 1024
with open(path, 'wb') as f:
for content in request.iter_content(chunk_size):
f.write(content)
def _http_download_if_new(url, path, check_modified):
head = requests.head(url)
if not os.path.exists(path) or not _request_file_size_matches(head, path):
_http_download_file(url, path)
elif check_modified and _request_is_newer_than_file(head, path):
_http_download_file(url, path)
def _nans_to_nones(nan_dict):
"""takes a dict and if any values are np.nan then it will replace them with
None"""
return dict([
(k, v) if v is not np.nan else (k, None)
for k, v in nan_dict.iteritems()
])
def _parse_rfc_1123_timestamp(timestamp_str):
return datetime.datetime(*email.utils.parsedate(timestamp_str)[:6])
def _path_last_modified(path):
"""returns a datetime.datetime object representing the last time the file at
a given path was last modified
"""
if not os.path.exists(path):
return None
return datetime.datetime.utcfromtimestamp(os.path.getmtime(path))
def _request_file_size_matches(request, path):
"""returns True if request content-length header matches file size"""
content_length = request.headers.get('content-length')
if content_length and int(content_length) == os.path.getsize(path):
return True
else:
return False
def _request_is_newer_than_file(request, path):
"""returns true if a request's last-modified header is more recent than a
file's last modified timestamp
"""
path_last_modified = _path_last_modified(path)
if path_last_modified is None:
return True
if not request.headers.get('last-modified'):
warnings.warn('no last-modified date for request: %s, downloading file again' % request.url)
return True
request_last_modified = _parse_rfc_1123_timestamp(request.headers.get('last-modified'))
if request_last_modified > path_last_modified:
return True
else:
return False
|
bsd-3-clause
|
idf/scipy_util
|
scipy_util/regressions/logistic_regression.py
|
1
|
1748
|
import numpy as np
from scipy.stats import logistic
import matplotlib.pyplot as plt
class LogisticRegressioner(object):
def __init__(self, tol=1e-6):
self.tol = tol
def first_derivative(self, X, Y, w):
"""
Calculate the 1st derivative of log-loss function
"""
d, T = X.shape
sigma = logistic.cdf(np.multiply(-Y, np.dot(w.T, X)))
ret = np.multiply(sigma, Y)
ret = np.multiply(np.repeat(ret[np.newaxis, :], d, axis=0), X)
ret = np.sum(ret, axis=1)
return ret
def log_loss(self, X, Y, w):
L = np.log(logistic.cdf(np.multiply(Y, np.dot(w.T, X))))
L = np.sum(L)
L = -L
return L
def classifier_w(self, X, Y,eta=0.05):
"""
and returns a classification vector w \in Rp obtained by gradient
descent on the logistic regression loss function
"""
X = X.T
d, T = X.shape
X = np.vstack([np.ones((T, )), X]) # add bias 1
d += 1
w = np.zeros(d)
t = 0
while True:
t += 1
w_ = w + eta*self.first_derivative(X, Y, w)
L_old, L_new = self.log_loss(X, Y, w_), self.log_loss(X, Y, w)
if np.abs(L_old - L_new) < self.tol: break
w = w_
if t % 300 == 1: yield w # yield for trace the w
yield w
def test_sample(self):
X = np.array([
[2, 1],
[1, 20],
[1, 5],
[4, 1],
[1, 40],
[3, 30],
])
Y = np.array([
-1,
-1,
-1,
1,
1,
1,
])
ws = list(self.classifier_w(X, Y))
print ws[-1]
|
bsd-3-clause
|
studywolf/blog
|
tracking_control/tracking_control5.py
|
1
|
4790
|
""" An implementation based on the 2-link arm plant and controller from
(Slotine & Sastry, 1983).
"""
import matplotlib.pyplot as plt
import numpy as np
import seaborn
class plant:
def __init__(self, dt=.001, theta1=[0.0, 0.0], theta2=[0.0, 0.0]):
"""
dt float: simulation time step
theta1 list/np.array: [position, velocity]
theta2 list/np.array: [position, velocity]
"""
self.dt = dt
self.theta1 = np.asarray(theta1)
self.theta2 = np.asarray(theta2)
def step(self, T):
""" apply torques T and move one time step forward """
Tprime_1 = (2*T[0] + np.sin(self.theta2[0])*self.theta2[1] *
(2*self.theta1[1] + self.theta2[1]))
Tprime_2 = 2*T[1] - np.sin(self.theta2[0])*self.theta2[1]
denom = (16.0/9.0 - np.cos(self.theta2[0])**2)
ddtheta1 = (
(2.0/3.0 * Tprime_1 - (2.0/3.0 +
np.cos(self.theta2[0]) * Tprime_2)) / denom)
ddtheta2 = (
(-(2.0/3.0 + np.cos(self.theta2[0])) * Tprime_1 +
2*(5.0/3.0 + np.cos(self.theta2[0])) * Tprime_2) / denom)
self.theta1 += np.array([self.theta1[1], ddtheta1]) * self.dt
self.theta2 += np.array([self.theta2[1], ddtheta2]) * self.dt
@property
def state(self):
return self.theta1, self.theta2
class controller:
def __init__(self):
pass
def control(self, t, theta1, theta2, theta1d, theta2d):
"""
t float: the current time (desired trajectory is a function of time)
thetas np.array: plant state
"""
# calculate the value of s for generating our gains
s1 = (theta1[0] - theta1d[0]) + 5*(theta1[1] - theta1d[1])
s2 = (theta2[0] - theta2d[0]) + 5*(theta2[1] - theta2d[1])
# gains for u1
b11 = -0.7 if s1*theta2[1]*(2*theta1[1] + theta2[1]) > 0 else 0.7
b12 = -1.2 if s1*(theta1[1]**2) > 0 else 1.2
k11 = -9 if s1*(theta1[1] - theta1d[1]) > 0 else -3.8
# gains for u2
b21 = -1.2 if s2*theta2[1]*(2*theta1[1] + theta2[1]) > 0 else 1.2
b22 = -4.4 if s2*(theta1[1]**2) > 0 else 4.4
k21 = -9 if s2*(theta2[1] - theta2d[1]) > 0 else -3.8
# shared gains
k2 = 3.15
u1 = (
b11*theta2[1]*(2*theta1[1] + theta2[1]) + b12*theta1[1]**2 +
k11*(theta1[1] - theta1d[1]) - k2*np.sign(s1))
u2 = (
b21*theta2[1]*(2*theta1[1] + theta2[1]) + b22*theta1[1]**2 +
k21*(theta2[1] - theta2d[1]) - k2*np.sign(s2))
T2 = ((u2 + (1 + 3.0/2.0*np.cos(theta2[0]))*u1) /
(10.0/3.0 - np.cos(theta2[0])))
T1 = 3.0/4.0*(u1 + (4.0/3.0 + 2*np.cos(theta2[0]))*T2)
return np.array([T1, T2])
T = 5
dt = 0.001
timeline = np.arange(0.0, T, dt)
ctrlr = controller()
theta1 = [-90.0, 0.0]
theta2 = [170.0, 0.0]
plant_uncontrolled = plant(dt=dt, theta1=theta1, theta2=theta2)
theta1_uncontrolled_track = np.zeros((timeline.shape[0], 2))
theta2_uncontrolled_track = np.zeros((timeline.shape[0], 2))
plant_controlled = plant(dt=dt, theta1=theta1, theta2=theta2)
theta1_controlled_track = np.zeros((timeline.shape[0], 2))
theta2_controlled_track = np.zeros((timeline.shape[0], 2))
theta1d_1 = lambda t: -90 + 52.5*(1 - np.cos(1.26*t)) if t <= 2.5 else 15
theta1d_2 = lambda t: 52.5*1.26*np.sin(1.26*t) if t <= 2.5 else 0.0
theta2d_1 = lambda t: 170 - 60*(1 - np.cos(1.26*t)) if t <= 2.5 else 50
theta2d_2 = lambda t: -60*1.26*np.sin(1.26*t) if t < 2.5 else 0.0
thetad_track = np.zeros((timeline.shape[0], 2))
for ii, t in enumerate(timeline):
if ii % int(1.0/dt) == 0:
print('t: ', t)
theta1d = [theta1d_1(t), theta1d_2(t)]
theta2d = [theta2d_1(t), theta2d_2(t)]
thetad_track[ii] = [theta1d[0], theta2d[0]]
(theta1_uncontrolled_track[ii],
theta2_uncontrolled_track[ii]) = plant_uncontrolled.state
(theta1_controlled_track[ii],
theta2_controlled_track[ii]) = plant_controlled.state
u = ctrlr.control(t,
theta1_controlled_track[ii],
theta2_controlled_track[ii],
theta1d, theta2d)
plant_uncontrolled.step(np.array([0.0, 0.0]))
plant_controlled.step(u)
plt.subplot(2, 1, 1)
plt.plot(timeline, theta1_uncontrolled_track[:, 0], lw=2)
plt.plot(timeline, theta1_controlled_track[:, 0], lw=2)
plt.plot(timeline, thetad_track[:, 0], 'r--', lw=2)
plt.legend(['uncontrolled', 'controlled', 'target'])
plt.subplot(2, 1, 2)
plt.plot(timeline, theta2_uncontrolled_track[:, 0], lw=2)
plt.plot(timeline, theta2_controlled_track[:, 0], lw=2)
plt.plot(timeline, thetad_track[:, 1], 'r--', lw=2)
plt.legend(['uncontrolled', 'controlled', 'target'])
plt.tight_layout()
plt.show()
|
gpl-3.0
|
srgblnch/MeasuredFillingPattern
|
tango-ds/MeasuredFillingPatternPhCt/phAnalyser.py
|
1
|
25406
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
##############################################################################
## license : GPLv3+
##============================================================================
##
## File : phAnalyser.py
##
## Project : Filling Pattern from the Photon Counter
##
## description : Python source with the class that has the appropriate methods
## to...
##
## This file is part of Tango device class.
##
## Tango is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Tango is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Tango. If not, see <http://www.gnu.org/licenses/>.
##
## $Author : Laura Torino$ (first developer)
##
## $Revision : $
##
## $Date : $
##
## $HeadUrl : $
##
## copyleft : Cells / Alba Synchrotron
## Cerdanyola/Bellaterra
## Spain
##############################################################################
###############################################################################
# /data/Diagnostics/Laura/PhotonCountingTopUp/phAnalyzer #
# #
# This program analyses data coming from a Photon Counting device server #
# - Data are uploaded #
# - Read the resolution #
# - Calculate the filling status of the different buckets #
###############################################################################
from copy import copy, copy
from numpy import *
import PyTango
from scipy import *
#from scipy import signal
import taurus
import time
import traceback
#class Analyser:
# def __init__(self,parent=None):
# self._parent = parent
#
# ######
# #----# auxiliary methods for logging
# def info(self,msg):
# try:
# if self._parent:
# self._parent.info_stream(msg)
# else:
# print("info: %s"%(msg))
# except: print("cannot print in info stream (%s)"%msg)
# def debug(self,msg):
# try:
# if self._parent:
# self._parent.debug_stream(msg)
# else:
# print("debug: %s"%(msg))
# except: print("cannot print in debug stream (%s)"%msg)
# def warn(self,msg):
# try:
# if self._parent:
# self._parent.warn_stream(msg)
# else:
# print("warn: %s"%(msg))
# except: print("cannot print in warn stream (%s)"%msg)
# def error(self,msg):
# try:
# if self._parent:
# self._parent.error_stream(msg)
# else:
# print("error: %s"%(msg))
# except: print("cannot print in error stream (%s)"%msg)
# # done logging section
# ######
#
class PhCtAnalyzer(object):#(Analyser):
def __init__(self,PhCtDevName,
histogramAttr="histogram",resolutionAttr="resolution",
dcctDev='SR/DI/DCCT',dcctAttr='AverageCurrent',
BucketLenght=2*1e-9,threshold=1,nAcquisitions=30,
parent=None):
super(PhCtAnalyzer,self).__init__()
self._parent = parent#for the logging
#Analyser.__init__(self, parent)
#super(PhCtAnalyzer,self).__init__(parent)
self._PhCtDevName = None
self._PhCtDevProxy = None
self._HistogramAttr = None
self._Histogram = None
self._nAcquisitions = nAcquisitions
self._cyclicBuffer = None
self._resolutionAttr = None
self._dcctDev = None
self._dcctAttr = None
self._BucketLength = None
self._threshold = None
self._Tot_Bucket = None
self.info("contructor setter for PhCt device name")
self._PhCtDevName = PhCtDevName
self.info("contructor has set PhCt device name %s"%(self.PhCtDevName))
self.HistogramAttr = histogramAttr
self._resolutionAttr = resolutionAttr
self._dcctDev = dcctDev
self._dcctAttr = dcctAttr
self.BucketLength = BucketLenght
self.threshold = threshold
self._t0 = []
self._tf = []
#---- outputs
self._bunchIntensity = []
self._bunchIntensityQuality = PyTango.AttrQuality.ATTR_INVALID
self._filledBunches = 0
self._spuriousBunches = 0
self._resultingFrequency = 0.0
@property
def PhCtDevName(self):
return self._PhCtDevName
@PhCtDevName.setter
def PhCtDevName(self,value):
try:
print(".")
self.info("New PhCt device name %s"%(value))
try:
print("..")
self._PhCtDevName = value
print("...")
self._PhCtDevProxy = PyTango.DeviceProxy(self._PhCtDevName)
except Exception,e:
print("!")
self.error("Error making PhCt device proxy: %s"%(e))
raise e
else:
print("+")
self.info("PhCt device proxy made")
except Exception,e:
print("?")
self.error("Agh! %s"%(e))
@property
def PhCtDevProxy(self):
self.info("PhCt device proxy requested")
if self._PhCtDevProxy == None:
if self._PhCtDevName == None:
self.error("Unknown device name to build a proxy")
return None
self.warn("Building proxy on the fly for %s"%(self._PhCtDevName))
self._PhCtDevProxy = PyTango.DeviceProxy(self._PhCtDevName)
self.info("Returning PhCt proxy")
return self._PhCtDevProxy
@property
def HistogramAttr(self):
return self._HistogramAttr
@HistogramAttr.setter
def HistogramAttr(self,value):
self._HistogramAttr = value
@property
def nAcquisitions(self):
return self._nAcquisitions
@nAcquisitions.setter
def nAcquisitions(self,value):
self._nAcquisitions = value
@property
def lenCyclicBuffer(self):
if self._cyclicBuffer is not None:
return self._cyclicBuffer.shape[0]
return 0
@property
def Histogram(self):
return self._Histogram
@Histogram.setter
def Histogram(self,value):
if self._cyclicBuffer == None:
self._cyclicBuffer = array([value])
self.debug("Collected a first array in the cyclic buffer (%s)"
% (str(self._cyclicBuffer.shape)))
else:
self._cyclicBuffer = concatenate((self._cyclicBuffer,
array([value])))
self.debug("Concatenated another array (%s)"
% (str(self._cyclicBuffer.shape)))
while self.lenCyclicBuffer > self.nAcquisitions:
self._cyclicBuffer = delete(self._cyclicBuffer,(0),axis=0)
self.debug("Cyclic buffer shape %s" % (str(self._cyclicBuffer.shape)))
self._Histogram = self._cyclicBuffer.mean(axis=0)
# @property
# def InputSignal(self):
# return self._Histogram
@property
def resolutionAttr(self):
return self._resolutionAttr
@resolutionAttr.setter
def resolutionAttr(self,value):
self._resolutionAttr = value
@property
def Resolution(self):
try:
fullAttrName = "%s/%s"%(self._PhCtDevName,self._resolutionAttr)
return taurus.Attribute(fullAttrName).read().value
except:
return None
@property
def dcctDev(self):
return self._dcctDev
@dcctDev.setter
def dcctDev(self,value):
self._dcctDev = value
@property
def dcctAttr(self):
return self._dcctAttr
@dcctAttr.setter
def dcctAttr(self,value):
self._dcctAttr = value
@property
def Current(self):
try:
fullAttrName = self._dcctDev+'/'+self._dcctAttr
return taurus.Attribute(fullAttrName).read().value
except:
return 0.0
@property
def BucketLenght(self):
return self._BucketLenght
@BucketLenght.setter
def BucketLenght(self,value):
self._BucketLenght = value
@property
def threshold(self):
return self._threshold
@threshold.setter
def threshold(self,value):
self._threshold = value
@property
def ResultingFrequency(self):
return self._resultingFrequency
@property
def TotBucket(self):
return self._Tot_Bucket
@property
def BunchIntensity(self):
return self._bunchIntensity
@property
def BunchIntensityQuality(self):
return self._bunchIntensityQuality
@property
def FilledBunches(self):
return self._filledBunches
@property
def SpuriousBunches(self):
return self._spuriousBunches
#a callback method for the scope channel attribute
def push_event(self,event):
try:
if event != None:
if event.device.dev_name() == self._PhCtDevName:
if event.attr_value != None and \
event.attr_value.value != None:
if event.attr_value.quality in \
[PyTango.AttrQuality.ATTR_VALID,
PyTango.AttrQuality.ATTR_CHANGING]:
if self.isCurrentOk():
if self.areNAcquisitions():
if not self.isRunning():
self.setRunning()
else:
if not self.isStandby():
self.setStandby("Collecting samples")
self.debug("Received valid data! (%d,%s)"
%(len(event.attr_value.value),
event.attr_value.quality))
self.Histogram = event.attr_value.value
self._bunchIntensityQuality = \
event.attr_value.quality
self.calculateMeasurements()
self.calculateResultingFrequency()
self.emit_results()
else:
self.debug("Data is %s"%(event.attr_value.quality))
else:
self.debug("PushEvent() %s: value has None type"
%(event.attr_name))
else:
self.warn("Received an unexpected event from %s"
%(event.device.dev_name()))
else:
self.warn("Received a null event")
except Exception,e:
msg = "cannot process event due to: %s"%e
self.error(msg)
self.setFault(msg)
traceback.print_exc()
def areNAcquisitions(self):
if self.lenCyclicBuffer < self.nAcquisitions:
return False
return True
def isCurrentOk(self):
if self.Current > 0.0:
return True
else:
#when there is no beam, no calculation to be made
if self.isRunning():
self.emit_zeros()
self.setStandby("Beam current")
self._cyclicBuffer = None
return False
def calculateMeasurements(self):
bucket,self._bunchIntensity = self.Fil_Pat_Calc(self.Histogram)
self.debug("len(_bunchIntensity) = %d"%(len(self._bunchIntensity)))
self._filledBunches = self.bunchCount(self._bunchIntensity)
self.debug("FilledBunches = %d"%self._filledBunches)
self._spuriousBunches = self.spuriousBunches(self._bunchIntensity)
self.debug("SpuriousBunches = %d"%self._spuriousBunches)
def calculateResultingFrequency(self):
samples = len(self._tf)
lapses = []
for i in range(samples-1):
lapses.append(self._tf[i+1]-self._tf[i])
self._resultingFrequency = 1/average(lapses)
def areNAcquisitions(self):
if self.lenCyclicBuffer < self.nAcquisitions:
return False
return True
def isCurrentOk(self):
if self.Current > 0.0:
return True
else:
#when there is no beam, no calculation to be made
if not self.isStandby():
self.warn("No beam to do a calculation")
self.emit_zeros()
self.setStandby("Beam current")
self._cyclicBuffer = None
return False
def emit_results(self):
if self._parent:
nBunches = self._filledBunches-self._spuriousBunches
events2emit = []
events2emit.append(['BunchIntensity',self.BunchIntensity,
self.BunchIntensityQuality])
events2emit.append(['InputSignal',self.Histogram,
self.BunchIntensityQuality])
events2emit.append(['resultingFrequency',self._resultingFrequency])
events2emit.append(['FilledBunches',self._filledBunches])
events2emit.append(['SpuriousBunches',self._spuriousBunches])
events2emit.append(['nBunches',nBunches])
if self.areNAcquisitions():
nAcquisitionsQuality = PyTango.AttrQuality.ATTR_VALID
else:
nAcquisitionsQuality = PyTango.AttrQuality.ATTR_CHANGING
events2emit.append(['nAcquisitions',self.lenCyclicBuffer,
nAcquisitionsQuality])
self._parent.fireEventsList(events2emit)
self._parent.attr_BunchIntensity_read = self.BunchIntensity
def emit_zeros(self):
if self._parent:
self._bunchIntensity = array([0]*448)
events2emit = []
events2emit.append(['BunchIntensity',self._bunchIntensity])
events2emit.append(['resultingFrequency',0.0])
events2emit.append(['FilledBunches',0])
events2emit.append(['SpuriousBunches',0])
events2emit.append(['nBunches',0])
events2emit.append(['nAcquisitions',0])
self._parent.fireEventsList(events2emit)
self._parent.attr_BunchIntensity_read = self.BunchIntensity
####
# original methods of the ph analysis
def mov_av(self,data):
data_fil = []
for i in range(len(data)-1):
data_fil.append((data[i]+data[i+1])/2)
data_fil.append(0)
return array(data_fil)
def Fil_Pat_Calc(self,y_data):
'''Calculation of the filling status of the 448 buckets'''
t0 = time.time()
#self.debug("Fil_Pat_Calc()")
# Usefull variables
secperbin = self.Resolution*1e-12
#Convert the resolution (ps) in second
time_win = round(self.BucketLength/secperbin)
self._Tot_Bucket = round(448*self.BucketLength/secperbin)
#prepare arrays
y_data = y_data[0:self._Tot_Bucket+1]
x_data = range(len(y_data))
fil_pat = []
k = 0
Start = 0
i=0
#Analysis
#self.debug("Data analysis")
while (Start < len(y_data)):
k = 0
time_win_ar = [] #Array representing the time of a bucket
if (Start + time_win < len(y_data)):
for k in range(0, int(time_win)):
time_win_ar.append(y_data[Start+k]) #create the bucket
fil_pat.append(sum(time_win_ar)) #considering all the photons
#in the bucket
Start = Start + time_win #switch to the following bucket
#Impose a threshold (Not sure if needed)
i = 0
Max = max(fil_pat)
#thr = 1 #input('Threshold (%): ')
thr = self.threshold*0.01
#generate the array with the bucket number
bucket = []
fil_pat_thr = array(fil_pat>Max*thr)
fil_pat = fil_pat*fil_pat_thr.astype(int)
#fil_pat = self.mov_av(fil_pat)
cur = self.Current
fil_pat = array(fil_pat)
fil_pat.astype(float)
fil_pat = fil_pat*cur/sum(fil_pat)
tf = time.time()
self._t0.append(t0)
self._tf.append(tf)
self.debug("current calculation in %f"%(tf-t0))
while len(self._tf) > 10*3:#self.NAcquisitions:
self._t0.pop(0)
self._tf.pop(0)
return (bucket,fil_pat)
def bunchCount(self,vec_p_to_p):
'''TODO: document this method'''
#FIXME: parameters would be in side the class?
count = 0
bunch = 0
#TODO: document the loop
for count in range(0, len(vec_p_to_p)-1):
if(vec_p_to_p[count] > 0):
bunch = bunch + 1
return bunch
def spuriousBunches(self,vec_p_to_p):
'''TODO: document this method'''
#FIXME: parameters would be in side the class?
i = 0
j = 0
sp_bun = 0
#TODO: document
if (vec_p_to_p [i] != 0 and vec_p_to_p[i+1] == 0):
sp_bun = sp_bun + 1
i = i + 1
#TODO: document the loop
while (i < len(vec_p_to_p)-1):
if (i < len(vec_p_to_p)-10 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+10] == 0):
while (j < 10):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-9 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+9] == 0):
while (j < 9):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-8 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+8] == 0):
while (j < 8):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-7 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+7] == 0):
while (j < 7):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-6 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+6] == 0):
while (j < 6):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-5 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+5] == 0):
while (j < 5):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-4 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+4] == 0):
while (j < 4):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-3 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+3] == 0):
while (j < 3):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-2 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+2] == 0):
while (j < 2):
if (vec_p_to_p[i+j] != 0):
sp_bun = sp_bun +1
j = j + 1
elif (i < len(vec_p_to_p)-1 and \
vec_p_to_p[i-1] == 0 and \
vec_p_to_p[i] != 0 and \
vec_p_to_p[i+1] == 0):
sp_bun = sp_bun +1
j = 1
i = i + j + 1
j = 0
if (vec_p_to_p[len(vec_p_to_p)-1] != 0 and \
vec_p_to_p[len(vec_p_to_p)-2] == 0 ):
sp_bun = sp_bun + 1
return sp_bun
# done original methods of the ph analysis
####
######
#----# auxiliary methods for logging
def info(self,msg):
try:
if self._parent:
self._parent.info_stream(msg)
else:
print("info: %s"%(msg))
except: print("cannot print in info stream (%s)"%msg)
def debug(self,msg):
try:
if self._parent:
self._parent.debug_stream(msg)
else:
print("debug: %s"%(msg))
except: print("cannot print in debug stream (%s)"%msg)
def warn(self,msg):
try:
if self._parent:
self._parent.warn_stream(msg)
else:
print("warn: %s"%(msg))
except: print("cannot print in warn stream (%s)"%msg)
def error(self,msg):
try:
if self._parent:
self._parent.error_stream(msg)
else:
print("error: %s"%(msg))
except: print("cannot print in error stream (%s)"%msg)
# done logging section
######
######
#----# auxiliary methods to manage events
def subscribeHistogram(self):
try:
self._HistogramEvent = self.PhCtDevProxy.subscribe_event(\
self.HistogramAttr,
PyTango.EventType.CHANGE_EVENT,self)
except Exception,e:
self.error("Cannot subscribe to Histogram due to: %s"%(e))
self.info("PhCt proxy type: %s"%(type(self.PhCtDevProxy)))
def unsubscribeHistogram(self):
self.PhCtDevProxy.unsubscribe_event(self._HistogramEvent)
self._parent.change_state(PyTango.DevState.OFF)
# def subscribe_event(self,attrName):
# self._AttrEvent = self.PhCtDevProxy.subscribe_event(attrName,
# PyTango.EventType.CHANGE_EVENT,
# self)
# def unsubscribe_event(self,devName):
# self.PhCtDevProxy.unsubscribe_event(self._AttrEvent)
# self._parent.change_state(PyTango.DevState.OFF)
#---- auxiliary methods to manage events
######
######
#----# auxiliary methods to manage states
def isStandby(self):
if self._parent:
return self._parent.get_state() == PyTango.DevState.STANDBY
return False
def isRunning(self):
if self._parent:
return self._parent.get_state() == PyTango.DevState.RUNNING
return False
def setStandby(self,msg=None):
if self._parent:
self._parent.change_state(PyTango.DevState.STANDBY)
if msg:
self._parent.addStatusMsg("Waiting due to %r" % (msg))
else:
self._parent.addStatusMsg("Waiting...")
def setRunning(self):
if self._parent:
self._parent.change_state(PyTango.DevState.RUNNING)
self._parent.addStatusMsg("Receiving events")
def setFault(self,msg):
if self._parent:
self._parent.change_state(PyTango.DevState.FAULT)
self._parent.addStatusMsg(msg)
#---- auxiliary methods to manage states
######
# Done PhCtBunchAnalyser Class
####
####
# plot methods used when this is called by command line
def plotPhCt(bucket,fil_pat):
from pylab import *
from matplotlib.pyplot import draw, figure, show
f1 = figure()
af1 = f1.add_subplot(111)
af1.plot(bucket, fil_pat)
xlabel('Bucket Number')
ylabel('Current (mA)')
plt.title("Filling Pattern")
# end plot methods
####
def main():
################################# Analysis ################################
FP = PhCtAnalyzer('bl34/di/phct-01')
y = taurus.Attribute('bl34/di/phct-01/Histogram').read().value
bucket,fil_pat = FP.Fil_Pat_Calc(y) #Final output
plotPhCt(bucket,fil_pat)
################################# Output ##################################
show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
airanmehr/bio
|
Scripts/HLI/Kyrgyz/IBD.py
|
1
|
3336
|
import os
import matplotlib as mpl
import pandas as pd;
import numpy as np;
import seaborn as sns
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
from matplotlib.backends.backend_pdf import PdfPages
pd.options.display.max_rows = 50;
pd.options.display.expand_frame_repr = False
import pylab as plt;
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import Utils.Estimate as est
import Utils.Plots as pplt
import matplotlib as mpl
mpl.rcParams['text.usetex']=False
path='~/storage/Data/Human/Kyrgyz/share/plink/unphased/'
def load(name=None,f=None):
if f is None:f=path+name
a=pd.read_csv(f,sep=' ')
c=a.columns;c=c[map(lambda x: 'Unnamed' not in x and 'EZ' not in x,c)]
a=a.dropna(1,how='all')
a.columns=c
a=a.set_index(['IID1', 'IID2']).Z2
return pd.concat([a,a.reorder_levels([1,0])]).unstack()
def IBD(CHROM=2):
a=load('chr21.ibd.genome'.format(CHROM))
pop=pd.read_pickle(utl.home+'storage/Data/Human/Kyrgyz/share/info/pop.df').set_index('hap').xs('A')
pop['name']=pop.SampleName.apply(lambda x: x.split('-')[0])
a.index=pop.set_index('SampleID').name.loc[a.index]
a.columns=pop.set_index('SampleID').name.loc[a.columns]
I=pop.sort_values('GroupName').name.values
a=a.loc[I,I]
plt.figure(dpi=150);g=sns.heatmap(a);g.set_xticklabels(g.get_xticklabels(),rotation=90);g.set_yticklabels(g.get_yticklabels(),rotation=0)
tt = (pop.sort_values('GroupName').groupby('GroupName').size().cumsum() - 6)/10
#plt.twiny();plt.yticks(tt.tolist(),tt.index);
l=pop.sort_values('GroupName').groupby('GroupName').size().cumsum().iloc[:-1].tolist()
for x in l:[plt.axvline(x),plt.axhline(x)]
plt.twiny();
plt.xticks(tt.tolist(), list(tt.index))
plt.title('IBD: HA11 is contaminated by HK7 (from control)', y=1.08)
plt.figure(dpi=150);g=sns.heatmap(a[a<0.9]);g.set_xticklabels(g.get_xticklabels(),rotation=90);g.set_yticklabels(g.get_yticklabels(),rotation=0)
for x in l:[plt.axvline(x),plt.axhline(x)]
plt.twiny();
plt.xticks(tt.tolist(), list(tt.index))
plt.title('IBD: Ignoring HA11')
pplt.Manhattan()
plt.figure(dpi=150);
g = sns.heatmap(a[a < 0.9].loc[['HA11','HK7']]);
g.set_xticklabels(g.get_xticklabels(), rotation=90);
g.set_yticklabels(g.get_yticklabels(), rotation=0)
for x in l: [plt.axvline(x), plt.axhline(1)]
plt.twiny();
plt.xticks(tt.tolist(), list(tt.index))
plt.title('IBD: HK7 and HA11 with rest', y=1.08)
b=utl.pca(a.fillna(1),2)
plt.figure(dpi=150)
b.plot.scatter(x=0,s=100,y=1,ax=plt.gca())
for i, txt in enumerate(['HA11','HK7']):plt.annotate(txt, (b.loc[txt,0],b.loc[txt,1]),fontsize=18);plt.xlabel('PC1');plt.xlabel('PC2')
plt.title('PCA of IBD: HK7 and HA11 with rest', y=1.08)
b = utl.pca(a.drop('HA11').drop('HA11',1).fillna(1), 2);b=b.join(pop.set_index('name').GroupName)
#b.plot.scatter(x=0, s=100, y=1, ax=plt.gca())
b.columns=['PC1','PC2','POP']
sns.lmplot( 'PC1','PC2',fit_reg=False,data=b,hue='POP',scatter_kws={"marker": "D","s": 100},size=10)
#for i, txt in enumerate([ 'HK7']): plt.annotate(txt, (b.loc[txt, 'PC1'], b.loc[txt, ]), fontsize=18);
plt.title('PCA of IBD: Ignoring HA11 ', y=1.08)
plt.show()
import Scripts.HLI.Kyrgyz.IBSScan.IBDScan
|
mit
|
laurensdeprez/RMPCDMD
|
experiments/01-single-dimer/plot_msd.py
|
1
|
1627
|
#!/usr/bin/env python
from __future__ import print_function, division
import argparse
description = "Plot the mean square displacement of the dimer's center of mass."
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file', type=str, help='H5MD datafile', nargs='+')
args = parser.parse_args()
import numpy as np
import h5py
import matplotlib.pyplot as plt
msd_data = []
vz_data = []
for filename in args.file:
with h5py.File(filename, 'r') as f:
r = f['particles/dimer/position/value'][:,:,:]
r0 = f['particles/dimer/position/value'][0,:,:]
r_dt = f['particles/dimer/position/time'][()]
im = f['particles/dimer/image/value'][:,:,:]
v = f['particles/dimer/velocity/value'][...]
v_dt = f['particles/dimer/velocity/time'][()]
edges = f['particles/dimer/box/edges'][:].reshape((1,-1))
assert abs(r_dt-v_dt) < 1e-12
assert r.shape[1]==2
assert r.shape[2]==3
r += im*edges
v_com = v.mean(axis=1)
unit_z = r[:,1,:]-r[:,0,:]
unit_z /= np.sqrt(np.sum(unit_z**2, axis=1)).reshape((-1,1))
vz = np.sum(v_com*unit_z, axis=1)
vz_data.append(vz.mean())
r -= r0.reshape((1,2,3))
msd_data.append(np.sum(np.mean(r,axis=1)**2, axis=1))
msd_data = np.array(msd_data)
m = msd_data.mean(axis=0)
s = msd_data.std(axis=0)
vz = np.mean(vz_data, axis=0)
time = np.arange(r.shape[0])*r_dt
plt.plot(time, m, 'k-')
plt.plot(time, m+s, 'k--')
plt.plot(time, m-s, 'k--')
D = np.mean(m[1:]/time[1:])/6
plt.plot(time, 6*D*time, 'k:')
print("Estimated D_eff", D)
plt.show()
|
bsd-3-clause
|
christopher-gillies/MultiplePhenotypeAssociationBayesianNetwork
|
tests/test_normal.py
|
1
|
2226
|
from .context import mpabn
from mpabn import bayesian_network as bn
import numpy as np
from scipy import stats
import pandas as pd
from mpabn import helpers
from scipy.stats import norm
np.random.seed(0)
"""
py.test -s tests/test_normal.py
"""
def test_prob():
node = bn.LinearGaussianNode("X1")
#set intercept to b 1 and variance to be 1
node.set_params([1],10)
p_x = np.exp(node.prob({"X1":2}))
np.testing.assert_almost_equal(p_x,stats.norm.pdf(2,loc=1,scale=10))
def test_prob_2():
x1 = bn.BinaryNode("X1")
x1.set_params(0.5)
x2 = bn.BinaryNode("X2")
x2.set_params(0.5)
x3 = bn.LinearGaussianNode("X3")
x3.add_parent(x1)
x3.add_parent(x2)
#set intercept to b 1 and variance to be 1
x3.set_params([1,2,3],2)
p_x = np.exp(x3.prob({"X3":6,"X1":1,"X2":1}))
np.testing.assert_almost_equal(p_x,stats.norm.pdf(6,loc=6,scale=2))
def test_simulate():
x1 = bn.BinaryNode("X1")
x1.set_params(0.5)
x2 = bn.BinaryNode("X2")
x2.set_params(0.5)
x3 = bn.LinearGaussianNode("X3")
x3.add_parent(x1)
x3.add_parent(x2)
#set intercept to b 1 and variance to be 1
x3.set_params([1,2,3],2)
n = 20000
sample = np.zeros(n)
for i in range(0,n):
sample[i] = x3.simulate({"X1":1,"X2":1})
print np.mean(sample)
assert np.abs(np.mean(sample) - 6) < 0.02
def test_forward_sample_and_mle():
x1 = bn.BinaryNode("X1")
x1.set_params(0.5)
x2 = bn.BinaryNode("X2")
x2.set_params(0.5)
x3 = bn.LinearGaussianNode("X3")
x3.add_parent(x1)
x3.add_parent(x2)
#set intercept to b 1 and variance to be 1
x3.set_params([1,2,3],2)
network = bn.BayesianNetwork()
network.set_nodes([x1,x2,x3])
sample = network.forward_sample(10000)
print np.mean(sample["X3"])
assert np.abs(np.mean(sample["X3"]) - 14/4.0) < 0.05
network.mle(sample)
print x3.get_params()
betas, std_dev = x3.get_params()
assert np.abs(betas[0] - 1) < 0.1
assert np.abs(betas[1] - 2) < 0.1
assert np.abs(betas[2] - 3) < 0.1
assert np.abs(std_dev - 2) < 0.1
def test_gaussian_node():
x1 = bn.GaussianNode("X1")
x1.set_params(4.0,5.0)
np.testing.assert_almost_equal(x1.prob({"X1":1},log=False),norm.pdf(1,loc=4,scale=5))
np.testing.assert_almost_equal(x1.prob_easy(1),norm.pdf(1,loc=4,scale=5))
|
mit
|
chatcannon/scipy
|
scipy/interpolate/ndgriddata.py
|
39
|
7457
|
"""
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
|
bsd-3-clause
|
ChanChiChoi/scikit-learn
|
sklearn/neural_network/rbm.py
|
206
|
12292
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
dblalock/bolt
|
experiments/python/datasets/caltech.py
|
1
|
2804
|
#!/bin/env python
# from __future__ import absolute_import, division, print_function
from __future__ import division, print_function
import numpy as np
from . import paths
from . import image_utils as imgs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR_101 = paths.CALTECH_101
DATADIR_256 = paths.CALTECH_256
# _DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center', verbose=2)
_DEFAULT_CALTECH_KWARGS = dict(resample=(224, 224), crop='center')
_CALTECH_101_KWARGS = dict(
dirpath=DATADIR_101, remove_classes='BACKGROUND_Google')
_CALTECH_256_KWARGS = dict(
dirpath=DATADIR_256, remove_classes='257.clutter')
@_memory.cache
def load_caltech101(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_101_KWARGS, **kwargs)
@_memory.cache
def load_caltech256(**kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
return imgs.load_jpegs_from_dir(**_CALTECH_256_KWARGS, **kwargs)
@_memory.cache
def load_caltech101_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_101_KWARGS, only_return_path=True, **kwargs)
@_memory.cache
def load_caltech256_ids(**kwargs):
return imgs.load_jpegs_from_dir(
**_CALTECH_256_KWARGS, only_return_path=True, **kwargs)
# @_memory.cache
def load_caltech_img(img_id, **kwargs):
[kwargs.setdefault(*item) for item in _DEFAULT_CALTECH_KWARGS.items()]
path = img_id # load_jpegs_from_dir returns abs path as id
return imgs.load_jpg(path, **kwargs).astype(np.float32)
# img = imgs.load_jpg(path, **kwargs).astype(np.float32)
# print("img.shape", img.shape)
# assert img.shape[:2] == (224, 224)
# return img
def main():
import matplotlib.pyplot as plt
# caltech 101
(X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_101, remove_classes='BACKGROUND_Google')
# DATADIR_101, remove_classes='BACKGROUND_Google', crop='center')
DATADIR_101, remove_classes='BACKGROUND_Google', pad='square')
# # DATADIR_101, remove_classes='BACKGROUND_Google', resample=(224, 224))
# caltech 256
# (X, y), label2cls = imgs.load_jpegs_from_dir(
# DATADIR_256, remove_classes='257.clutter', verbose=2)
if isinstance(X, np.ndarray):
print("X shape: ", X.shape)
else:
print("X is a list of length", len(X))
print("X[0] has shape: ", X[0].shape)
print("y shape: ", y.shape)
_, axes = plt.subplots(4, 4, figsize=(9, 9))
for i, ax in enumerate(axes.ravel()):
idx = np.random.choice(len(X))
ax.imshow(X[idx])
label = label2cls[y[idx]]
ax.set_title(label)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
mpl-2.0
|
webmasterraj/GaSiProMo
|
flask/lib/python2.7/site-packages/pandas/tools/rplot.py
|
4
|
29150
|
import random
import warnings
from copy import deepcopy
from pandas.core.common import _values_from_object
import numpy as np
from pandas.compat import range, zip
#
# TODO:
# * Make sure legends work properly
#
warnings.warn("\n"
"The rplot trellis plotting interface is deprecated and will be "
"removed in a future version. We refer to external packages "
"like seaborn for similar but more refined functionality. \n\n"
"See our docs http://pandas.pydata.org/pandas-docs/stable/visualization.html#rplot "
"for some example how to convert your existing code to these "
"packages.", FutureWarning)
class Scale:
"""
Base class for mapping between graphical and data attributes.
"""
pass
class ScaleGradient(Scale):
"""
A mapping between a data attribute value and a
point in colour space between two specified colours.
"""
def __init__(self, column, colour1, colour2):
"""Initialize ScaleGradient instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere between colour1 and colour2
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
x_scaled = (x - a) / (b - a)
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
class ScaleGradient2(Scale):
"""
Create a mapping between a data attribute value and a
point in colour space in a line of three specified colours.
"""
def __init__(self, column, colour1, colour2, colour3):
"""Initialize ScaleGradient2 instance.
Parameters:
-----------
column: string, pandas DataFrame column name
colour1: tuple, 3 element tuple with float values representing an RGB colour
colour2: tuple, 3 element tuple with float values representing an RGB colour
colour3: tuple, 3 element tuple with float values representing an RGB colour
"""
self.column = column
self.colour1 = colour1
self.colour2 = colour2
self.colour3 = colour3
self.categorical = False
def __call__(self, data, index):
"""Return a colour corresponding to data attribute value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A three element tuple representing an RGB somewhere along the line
of colour1, colour2 and colour3
"""
x = data[self.column].iget(index)
a = min(data[self.column])
b = max(data[self.column])
r1, g1, b1 = self.colour1
r2, g2, b2 = self.colour2
r3, g3, b3 = self.colour3
x_scaled = (x - a) / (b - a)
if x_scaled < 0.5:
x_scaled *= 2.0
return (r1 + (r2 - r1) * x_scaled,
g1 + (g2 - g1) * x_scaled,
b1 + (b2 - b1) * x_scaled)
else:
x_scaled = (x_scaled - 0.5) * 2.0
return (r2 + (r3 - r2) * x_scaled,
g2 + (g3 - g2) * x_scaled,
b2 + (b3 - b2) * x_scaled)
class ScaleSize(Scale):
"""
Provide a mapping between a DataFrame column and matplotlib
scatter plot shape size.
"""
def __init__(self, column, min_size=5.0, max_size=100.0, transform=lambda x: x):
"""Initialize ScaleSize instance.
Parameters:
-----------
column: string, a column name
min_size: float, minimum point size
max_size: float, maximum point size
transform: a one argument function of form float -> float (e.g. lambda x: log(x))
"""
self.column = column
self.min_size = min_size
self.max_size = max_size
self.transform = transform
self.categorical = False
def __call__(self, data, index):
"""Return matplotlib scatter plot marker shape size.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
x = data[self.column].iget(index)
a = float(min(data[self.column]))
b = float(max(data[self.column]))
return self.transform(self.min_size + ((x - a) / (b - a)) *
(self.max_size - self.min_size))
class ScaleShape(Scale):
"""
Provides a mapping between matplotlib marker shapes
and attribute values.
"""
def __init__(self, column):
"""Initialize ScaleShape instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.shapes = ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x']
self.legends = set([])
self.categorical = True
def __call__(self, data, index):
"""Returns a matplotlib marker identifier.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
a matplotlib marker identifier
"""
values = sorted(list(set(data[self.column])))
if len(values) > len(self.shapes):
raise ValueError("Too many different values of the categorical attribute for ScaleShape")
x = data[self.column].iget(index)
return self.shapes[values.index(x)]
class ScaleRandomColour(Scale):
"""
Maps a random colour to a DataFrame attribute.
"""
def __init__(self, column):
"""Initialize ScaleRandomColour instance.
Parameters:
-----------
column: string, pandas DataFrame column name
"""
self.column = column
self.categorical = True
def __call__(self, data, index):
"""Return a tuple of three floats, representing
an RGB colour.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
"""
random.seed(data[self.column].iget(index))
return [random.random() for _ in range(3)]
class ScaleConstant(Scale):
"""
Constant returning scale. Usually used automatically.
"""
def __init__(self, value):
"""Initialize ScaleConstant instance.
Parameters:
-----------
value: any Python value to be returned when called
"""
self.value = value
self.categorical = False
def __call__(self, data, index):
"""Return the constant value.
Parameters:
-----------
data: pandas DataFrame
index: pandas DataFrame row index
Returns:
--------
A constant value specified during initialisation
"""
return self.value
def default_aes(x=None, y=None):
"""Create the default aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
Returns:
--------
a dictionary with aesthetics bindings
"""
return {
'x' : x,
'y' : y,
'size' : ScaleConstant(40.0),
'colour' : ScaleConstant('grey'),
'shape' : ScaleConstant('o'),
'alpha' : ScaleConstant(1.0),
}
def make_aes(x=None, y=None, size=None, colour=None, shape=None, alpha=None):
"""Create an empty aesthetics dictionary.
Parameters:
-----------
x: string, DataFrame column name
y: string, DataFrame column name
size: function, binding for size attribute of Geoms
colour: function, binding for colour attribute of Geoms
shape: function, binding for shape attribute of Geoms
alpha: function, binding for alpha attribute of Geoms
Returns:
--------
a dictionary with aesthetics bindings
"""
if not hasattr(size, '__call__') and size is not None:
size = ScaleConstant(size)
if not hasattr(colour, '__call__') and colour is not None:
colour = ScaleConstant(colour)
if not hasattr(shape, '__call__') and shape is not None:
shape = ScaleConstant(shape)
if not hasattr(alpha, '__call__') and alpha is not None:
alpha = ScaleConstant(alpha)
if any([isinstance(size, scale) for scale in [ScaleConstant, ScaleSize]]) or size is None:
pass
else:
raise ValueError('size mapping should be done through ScaleConstant or ScaleSize')
if any([isinstance(colour, scale) for scale in [ScaleConstant, ScaleGradient, ScaleGradient2, ScaleRandomColour]]) or colour is None:
pass
else:
raise ValueError('colour mapping should be done through ScaleConstant, ScaleRandomColour, ScaleGradient or ScaleGradient2')
if any([isinstance(shape, scale) for scale in [ScaleConstant, ScaleShape]]) or shape is None:
pass
else:
raise ValueError('shape mapping should be done through ScaleConstant or ScaleShape')
if any([isinstance(alpha, scale) for scale in [ScaleConstant]]) or alpha is None:
pass
else:
raise ValueError('alpha mapping should be done through ScaleConstant')
return {
'x' : x,
'y' : y,
'size' : size,
'colour' : colour,
'shape' : shape,
'alpha' : alpha,
}
class Layer:
"""
Layer object representing a single plot layer.
"""
def __init__(self, data=None, **kwds):
"""Initialize layer object.
Parameters:
-----------
data: pandas DataFrame instance
aes: aesthetics dictionary with bindings
"""
self.data = data
self.aes = make_aes(**kwds)
self.legend = {}
def work(self, fig=None, ax=None):
"""Do the drawing (usually) work.
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis object
Returns:
--------
a tuple with the same figure and axis instances
"""
return fig, ax
class GeomPoint(Layer):
def work(self, fig=None, ax=None):
"""Render the layer on a matplotlib axis.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
for index in range(len(self.data)):
row = self.data.irow(index)
x = row[self.aes['x']]
y = row[self.aes['y']]
size_scaler = self.aes['size']
colour_scaler = self.aes['colour']
shape_scaler = self.aes['shape']
alpha = self.aes['alpha']
size_value = size_scaler(self.data, index)
colour_value = colour_scaler(self.data, index)
marker_value = shape_scaler(self.data, index)
alpha_value = alpha(self.data, index)
patch = ax.scatter(x, y,
s=size_value,
c=colour_value,
marker=marker_value,
alpha=alpha_value)
label = []
if colour_scaler.categorical:
label += [colour_scaler.column, row[colour_scaler.column]]
if shape_scaler.categorical:
label += [shape_scaler.column, row[shape_scaler.column]]
self.legend[tuple(label)] = patch
ax.set_xlabel(self.aes['x'])
ax.set_ylabel(self.aes['y'])
return fig, ax
class GeomPolyFit(Layer):
"""
Draw a polynomial fit of specified degree.
"""
def __init__(self, degree, lw=2.0, colour='grey'):
"""Initialize GeomPolyFit object.
Parameters:
-----------
degree: an integer, polynomial degree
lw: line width
colour: matplotlib colour
"""
self.degree = degree
self.lw = lw
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw the polynomial fit on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from numpy.polynomial.polynomial import polyfit
from numpy.polynomial.polynomial import polyval
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
min_x = min(x)
max_x = max(x)
c = polyfit(x, y, self.degree)
x_ = np.linspace(min_x, max_x, len(x))
y_ = polyval(x_, c)
ax.plot(x_, y_, lw=self.lw, c=self.colour)
return fig, ax
class GeomScatter(Layer):
"""
An efficient scatter plot, use this instead of GeomPoint for speed.
"""
def __init__(self, marker='o', colour='lightblue', alpha=1.0):
"""Initialize GeomScatter instance.
Parameters:
-----------
marker: matplotlib marker string
colour: matplotlib colour
alpha: matplotlib alpha
"""
self.marker = marker
self.colour = colour
self.alpha = alpha
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a scatter plot on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
ax.scatter(x, y, marker=self.marker, c=self.colour, alpha=self.alpha)
return fig, ax
class GeomHistogram(Layer):
"""
An efficient histogram, use this instead of GeomBar for speed.
"""
def __init__(self, bins=10, colour='lightblue'):
"""Initialize GeomHistogram instance.
Parameters:
-----------
bins: integer, number of histogram bins
colour: matplotlib colour
"""
self.bins = bins
self.colour = colour
Layer.__init__(self)
def work(self, fig=None, ax=None):
"""Draw a histogram on matplotlib figure or axis
Parameters:
-----------
fig: matplotlib figure
ax: matplotlib axis
Returns:
--------
a tuple with figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
ax.hist(_values_from_object(x), self.bins, facecolor=self.colour)
ax.set_xlabel(self.aes['x'])
return fig, ax
class GeomDensity(Layer):
"""
A kernel density estimation plot.
"""
def work(self, fig=None, ax=None):
"""Draw a one dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
from scipy.stats import gaussian_kde
x = self.data[self.aes['x']]
gkde = gaussian_kde(x)
ind = np.linspace(x.min(), x.max(), 200)
ax.plot(ind, gkde.evaluate(ind))
return fig, ax
class GeomDensity2D(Layer):
def work(self, fig=None, ax=None):
"""Draw a two dimensional kernel density plot.
You can specify either a figure or an axis to draw on.
Parameters:
-----------
fig: matplotlib figure object
ax: matplotlib axis object to draw on
Returns:
--------
fig, ax: matplotlib figure and axis objects
"""
if ax is None:
if fig is None:
return fig, ax
else:
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
X, Y = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
import scipy.stats as stats
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(Z, extent=[x_min, x_max, y_min, y_max])
return fig, ax
class TrellisGrid(Layer):
def __init__(self, by):
"""Initialize TreelisGrid instance.
Parameters:
-----------
by: column names to group by
"""
if len(by) != 2:
raise ValueError("You must give a list of length 2 to group by")
elif by[0] == '.' and by[1] == '.':
raise ValueError("At least one of grouping attributes must be not a dot")
self.by = by
def trellis(self, layers):
"""Create a trellis structure for a list of layers.
Each layer will be cloned with different data in to a two dimensional grid.
Parameters:
-----------
layers: a list of Layer objects
Returns:
--------
trellised_layers: Clones of each layer in the list arranged in a trellised latice
"""
trellised_layers = []
for layer in layers:
data = layer.data
if self.by[0] == '.':
grouped = data.groupby(self.by[1])
elif self.by[1] == '.':
grouped = data.groupby(self.by[0])
else:
grouped = data.groupby(self.by)
groups = list(grouped.groups.keys())
if self.by[0] == '.' or self.by[1] == '.':
shingle1 = set([g for g in groups])
else:
shingle1 = set([g[0] for g in groups])
shingle2 = set([g[1] for g in groups])
if self.by[0] == '.':
self.rows = 1
self.cols = len(shingle1)
elif self.by[1] == '.':
self.rows = len(shingle1)
self.cols = 1
else:
self.rows = len(shingle1)
self.cols = len(shingle2)
trellised = [[None for _ in range(self.cols)] for _ in range(self.rows)]
self.group_grid = [[None for _ in range(self.cols)] for _ in range(self.rows)]
row = 0
col = 0
for group, data in grouped:
new_layer = deepcopy(layer)
new_layer.data = data
trellised[row][col] = new_layer
self.group_grid[row][col] = group
col += 1
if col >= self.cols:
col = 0
row += 1
trellised_layers.append(trellised)
return trellised_layers
def dictionary_union(dict1, dict2):
"""Take two dictionaries, return dictionary union.
Parameters:
-----------
dict1: Python dictionary
dict2: Python dictionary
Returns:
--------
A union of the dictionaries. It assumes that values
with the same keys are identical.
"""
keys1 = list(dict1.keys())
keys2 = list(dict2.keys())
result = {}
for key1 in keys1:
result[key1] = dict1[key1]
for key2 in keys2:
result[key2] = dict2[key2]
return result
def merge_aes(layer1, layer2):
"""Merges the aesthetics dictionaries for the two layers.
Look up sequence_layers function. Which layer is first and which
one is second is important.
Parameters:
-----------
layer1: Layer object
layer2: Layer object
"""
for key in layer2.aes.keys():
if layer2.aes[key] is None:
layer2.aes[key] = layer1.aes[key]
def sequence_layers(layers):
"""Go through the list of layers and fill in the missing bits of information.
The basic rules are this:
* If the current layer has data set to None, take the data from previous layer.
* For each aesthetic mapping, if that mapping is set to None, take it from previous layer.
Parameters:
-----------
layers: a list of Layer objects
"""
for layer1, layer2 in zip(layers[:-1], layers[1:]):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layers
def sequence_grids(layer_grids):
"""Go through the list of layer girds and perform the same thing as sequence_layers.
Parameters:
-----------
layer_grids: a list of two dimensional layer grids
"""
for grid1, grid2 in zip(layer_grids[:-1], layer_grids[1:]):
for row1, row2 in zip(grid1, grid2):
for layer1, layer2 in zip(row1, row2):
if layer2.data is None:
layer2.data = layer1.data
merge_aes(layer1, layer2)
return layer_grids
def work_grid(grid, fig):
"""Take a two dimensional grid, add subplots to a figure for each cell and do layer work.
Parameters:
-----------
grid: a two dimensional grid of layers
fig: matplotlib figure to draw on
Returns:
--------
axes: a two dimensional list of matplotlib axes
"""
nrows = len(grid)
ncols = len(grid[0])
axes = [[None for _ in range(ncols)] for _ in range(nrows)]
for row in range(nrows):
for col in range(ncols):
axes[row][col] = fig.add_subplot(nrows, ncols, ncols * row + col + 1)
grid[row][col].work(ax=axes[row][col])
return axes
def adjust_subplots(fig, axes, trellis, layers):
"""Adjust the subtplots on matplotlib figure with the
fact that we have a trellis plot in mind.
Parameters:
-----------
fig: matplotlib figure
axes: a two dimensional grid of matplotlib axes
trellis: TrellisGrid object
layers: last grid of layers in the plot
"""
# Flatten the axes grid
axes = [ax for row in axes for ax in row]
min_x = min([ax.get_xlim()[0] for ax in axes])
max_x = max([ax.get_xlim()[1] for ax in axes])
min_y = min([ax.get_ylim()[0] for ax in axes])
max_y = max([ax.get_ylim()[1] for ax in axes])
[ax.set_xlim(min_x, max_x) for ax in axes]
[ax.set_ylim(min_y, max_y) for ax in axes]
for index, axis in enumerate(axes):
if index % trellis.cols == 0:
pass
else:
axis.get_yaxis().set_ticks([])
axis.set_ylabel('')
if index / trellis.cols == trellis.rows - 1:
pass
else:
axis.get_xaxis().set_ticks([])
axis.set_xlabel('')
if trellis.by[0] == '.':
label1 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
elif trellis.by[1] == '.':
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols])
label2 = None
else:
label1 = "%s = %s" % (trellis.by[0], trellis.group_grid[index // trellis.cols][index % trellis.cols][0])
label2 = "%s = %s" % (trellis.by[1], trellis.group_grid[index // trellis.cols][index % trellis.cols][1])
if label2 is not None:
axis.table(cellText=[[label1], [label2]],
loc='top', cellLoc='center',
cellColours=[['lightgrey'], ['lightgrey']])
else:
axis.table(cellText=[[label1]], loc='top', cellLoc='center', cellColours=[['lightgrey']])
# Flatten the layer grid
layers = [layer for row in layers for layer in row]
legend = {}
for layer in layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
fig.subplots_adjust(wspace=0.05, hspace=0.2)
class RPlot:
"""
The main plot object. Add layers to an instance of this object to create a plot.
"""
def __init__(self, data, x=None, y=None):
"""Initialize RPlot instance.
Parameters:
-----------
data: pandas DataFrame instance
x: string, DataFrame column name
y: string, DataFrame column name
"""
self.layers = [Layer(data, **default_aes(x=x, y=y))]
trellised = False
def add(self, layer):
"""Add a layer to RPlot instance.
Parameters:
-----------
layer: Layer instance
"""
if not isinstance(layer, Layer):
raise TypeError("The operand on the right side of + must be a Layer instance")
self.layers.append(layer)
def render(self, fig=None):
"""Render all the layers on a matplotlib figure.
Parameters:
-----------
fig: matplotlib figure
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
# Look for the last TrellisGrid instance in the layer list
last_trellis = None
for layer in self.layers:
if isinstance(layer, TrellisGrid):
last_trellis = layer
if last_trellis is None:
# We have a simple, non-trellised plot
new_layers = sequence_layers(self.layers)
for layer in new_layers:
layer.work(fig=fig)
legend = {}
for layer in new_layers:
legend = dictionary_union(legend, layer.legend)
patches = []
labels = []
if len(list(legend.keys())) == 0:
key_function = lambda tup: tup
elif len(list(legend.keys())[0]) == 2:
key_function = lambda tup: (tup[1])
else:
key_function = lambda tup: (tup[1], tup[3])
for key in sorted(list(legend.keys()), key=key_function):
value = legend[key]
patches.append(value)
if len(key) == 2:
col, val = key
labels.append("%s" % str(val))
elif len(key) == 4:
col1, val1, col2, val2 = key
labels.append("%s, %s" % (str(val1), str(val2)))
else:
raise ValueError("Maximum 2 categorical attributes to display a lengend of")
if len(legend):
fig.legend(patches, labels, loc='upper right')
else:
# We have a trellised plot.
# First let's remove all other TrellisGrid instances from the layer list,
# including this one.
new_layers = []
for layer in self.layers:
if not isinstance(layer, TrellisGrid):
new_layers.append(layer)
new_layers = sequence_layers(new_layers)
# Now replace the old layers by their trellised versions
new_layers = last_trellis.trellis(new_layers)
# Prepare the subplots and draw on them
new_layers = sequence_grids(new_layers)
axes_grids = [work_grid(grid, fig) for grid in new_layers]
axes_grid = axes_grids[-1]
adjust_subplots(fig, axes_grid, last_trellis, new_layers[-1])
# And we're done
return fig
|
gpl-2.0
|
gawrysz/piernik
|
python/interactive_plot_crs.py
|
3
|
29896
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from colored_io import die, prtinfo, prtwarn, read_var
from copy import copy
from crs_h5 import crs_initialize, crs_plot_main, crs_plot_main_fpq
from crs_pf import initialize_pf_arrays
from math import isnan, pi
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from numpy import array as np_array, log, log10, mean, rot90
from os import getcwd, makedirs, path
from optparse import OptionParser
from re import search
from read_h5 import read_par, input_names_array
from sys import argv, version
from warnings import simplefilter
try:
import yt
from yt.units import dimensions
except:
die("You must make yt available somehow")
if (version[0:3] != "2.7"):
raw_input = input
not_py27 = True
else:
not_py27 = False
# ------- Parse arguments
parser = OptionParser("Usage: %prog FILE [options] [args] or %prog [options] [args] -F FILENAME")
parser.add_option("-F", "--file", dest="filename", default="None", help=u"File to use", type="str")
parser.add_option("-v", "--var", dest="var_name", default="e", help=u"Variable to plot the spectrum (default: e)")
parser.add_option("-f", "--field", dest="fieldname", default="cree_tot", help=u"DS fieldname to image (default:cree_tot)")
parser.add_option("-z", "--zlim", dest="plot_range", default=("x", "y"), help=u"Plot image with this range", nargs=2, metavar="ZMIN ZMAX")
parser.add_option("-s", "--slice", dest="slice_info", default=("a", "Nan"), help=u"DS slice coords to image (default:cree_tot)", metavar="AX COORDINATE", nargs=2)
parser.add_option("-d", "--def", dest="default_range", default=False, help=u"Use min/max on yt.ds(fieldname) for clickable image", action="store_true")
parser.add_option("-l", "--lin", dest="use_linscale", default=False, help=u"Use linear scale for clickable plot (default: log)", action="store_true")
parser.add_option("-V", "--vel", dest="plot_vel", default=False, help=u"Plot velocity field vectors ", action="store_true")
parser.add_option("-m", "--mag", dest="plot_mag", default=False, help=u"Plot magnetic field vectors ", action="store_true")
parser.add_option("-t", "--time", dest="annotate_time", default=False, help=u"Annotate time on resulting DS plot", action="store_true")
parser.add_option("", "--nosave", dest="not_save_spec", default=False, help=u"Do not save output spectrum ", action="store_true")
parser.add_option("-a", "--average", "--avg", dest="avg_layer", default=False, help=u"Plot mean spectrum at pointed layer at ordinate axis", action="store_true")
parser.add_option("-O", "--overlap", dest="overlap_layer", default=False, help=u"Overlap all spectra at pointed layer at ordinate axis", action="store_true")
parser.add_option("", "--verbose", dest="yt_verbose", default=40, help=u"Append yt verbosity (value from 10 [high] to 50 [low])")
parser.add_option("-q", "--quiet", dest="py_quiet", default=False, help=u"Suppress ALL python warnings (not advised)", action="store_true")
parser.add_option("-c", "--coords", dest="coords_dflt", default=("x", "y", "z"), help=u"Provides coordinates for the first plot (non-clickable)", nargs=3, metavar="xc yc zc")
parser.add_option("-k", "--keep", dest="clean_plot", default=True, help=u"Keep spectrum plot (do not clean spectrum field)", action="store_false")
parser.add_option("", "--fontsize", dest="fontsize", default=18, help=u"Set fontsize for SlicePlot (default is 18)")
parser.add_option("", "--noxlabels", dest="no_xlabels", default=False, help=u"Do not show labels of X axis on resulting SlicePlot", action="store_true")
parser.add_option("", "--noylabels", dest="no_ylabels", default=False, help=u"Do not show labels of Y axis on resulting SlicePlot", action="store_true")
parser.add_option("", "--nocbar", dest="no_cbar", default=False, help=u"Do not show colorbar on the resulting SlicePlot", action="store_true")
parser.add_option("", "--noaxes", dest="no_axes", default=False, help=u"Hide axes on the spectrum plot (useful when combining spectra)", action="store_true")
parser.add_option("", "--rectangle", dest="annotate_rect", default=False, help=u"Annotate and average over width/height rectangle surface", action="store_true")
parser.add_option("", "--width", dest="usr_width", default=0., help=u"Set custom frb width")
parser.add_option("", "--height", dest="usr_height", default=0., help=u"Set custom frb width")
parser.add_option("", "--center", dest="usr_center", default=(0., 0.), help=u"Set custom frb center", nargs=2, metavar="XC YC")
(options, args) = parser.parse_args(argv[1:]) # argv[1] is filename
yt.mylog.setLevel(int(options.yt_verbose)) # Reduces the output to desired level, 50 - least output
if (options.py_quiet is True):
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore', category=Warning)
prtwarn("Python warnings are turned off from here (-q, --quiet switch)")
plot_var = options.var_name
user_draw_timebox = options.annotate_time
user_limits = (options.default_range is not True)
save_spectrum = (options.not_save_spec is not True)
use_logscale = (options.use_linscale is not True)
use_linscale = (options.use_linscale)
plot_field = options.fieldname
plot_var = options.var_name
plot_vel = options.plot_vel
plot_mag = options.plot_mag
if (plot_vel is True):
plot_mag = False
if (plot_mag is True):
plot_vel = False
user_annot_line = False
user_annot_time = True
plot_layer = options.avg_layer
plot_ovlp = options.overlap_layer
options.fontsize = int(options.fontsize)
display_bin_no = False
user_coords_provided = ((options.coords_dflt[0] != "x") and (options.coords_dflt[1] != "y") and (options.coords_dflt[2] != "z"))
if user_coords_provided:
try:
user_coords = [float(options.coords_dflt[0]), float(options.coords_dflt[1]), float(options.coords_dflt[2])]
except:
die("Got spectrum coordinates %s, but failed to convert it to float." % str(options.coords_dflt))
#####################################
# ------- Parameters ----------------
par_epsilon = 1.0e-15
f_run = True
pf_initialized = False
# ------- Local functions -----------
def _total_cree(field, data):
list_cree = []
for element in h5ds.field_list:
if search("cree", str(element[1])):
list_cree.append(element[1])
cree_tot = data[str(list_cree[0])]
for element in list_cree[1:]:
cree_tot = cree_tot + data[element]
return cree_tot
def _total_cren(field, data):
list_cren = []
for element in h5ds.field_list:
if search("cren", str(element[1])):
list_cren.append(element[1])
cren_tot = data[str(list_cren[0])]
for element in list_cren[1:]:
cren_tot = cren_tot + data[element]
return cren_tot
def _total_B(field, data):
b_tot = 2.85 * (data["mag_field_x"]**2 + data["mag_field_y"]**2 + data["mag_field_y"]**2)**0.5
return b_tot
def en_ratio(field, data): # DEPRECATED (?)
bin_nr = field.name[1][-2:]
for element in h5ds.field_list:
if search("cree" + str(bin_nr.zfill(2)), str(element[1])):
cren_data = data["cren" + str(bin_nr.zfill(2))]
cren_data[cren_data <= par_epsilon**2] = par_epsilon # necessary to avoid FPEs
cree_data = data["cree" + str(bin_nr.zfill(2))]
en_ratio = cree_data / cren_data
return en_ratio
def copy_field(field, data):
field_name_to_copy = field.name[1][:].split("_")[0]
copied_field = data[field_name_to_copy]
return copied_field
def add_cren_tot_to(h5_dataset):
try:
if (h5ds.all_data()["cren01"].units is "dimensionless"):
h5ds.add_field(("gdf", "cren_tot"), units="", function=_total_cren, display_name="Total CR electron number density", sampling_type="cell")
else:
h5ds.add_field(("gdf", "cren_tot"), units="1/(pc**3)", function=_total_cren, display_name="Total CR electron number density", dimensions=dimensions.energy / dimensions.volume, sampling_type="cell", take_log=True)
except:
die("Failed to construct field 'cren_tot'")
return h5_dataset
def add_cree_tot_to(h5_dataset):
try:
if (h5ds.all_data()["cree01"].units is "dimensionless"):
h5ds.add_field(("gdf", "cree_tot"), units="", function=_total_cree, display_name="Total CR electron energy density", sampling_type="cell")
else:
h5ds.add_field(("gdf", "cree_tot"), units="Msun/(Myr**2*pc)", function=_total_cree, display_name="Total CR electron energy density", dimensions=dimensions.energy / dimensions.volume, sampling_type="cell")
except:
die("Failed to construct field 'cree_tot'")
return h5_dataset
def add_tot_fields(h5_dataset):
h5_dataset = add_cree_tot_to(h5_dataset)
h5_dataset = add_cren_tot_to(h5_dataset)
return h5_dataset
# ---------- reading parameters
if (options.filename != "None"):
filename = options.filename
else:
filename = argv[1]
filename_trimmed = filename.split("/")[-1]
filename_ext = filename_trimmed.split('.')[-1]
filename_nam = filename_trimmed.split('.')[0].split('/')[-1]
if (filename_ext != 'h5'):
die("Script requires a (list of) hdf5 file(s) on input")
if f_run:
if not path.exists('results'):
makedirs('results')
prtinfo("Output directory created: %s" % (getcwd() + '/results'))
var_array = []
if f_run is True:
var_names = []
var_names = ["ncre", "p_min_fix", "p_max_fix", "e_small", "cre_eff", "q_big"]
var_def = [20, 10., 1.e5, 1.e-6, 0.01, 30., ]
if len(var_names) == 0:
prtwarn("Empty list of parameter names provided: enter names of parameters to read")
var_names = input_names_array()
var_array = read_par(filename, var_names, var_def)
for i in range(len(var_names)):
exec("%s=%s" % (var_names[i], var_array[i]))
prtinfo("\n*** Values read from problem.par@hdf5 file: *** \n")
for i in range(len(var_names)):
prtinfo(" %15s = %10s ( %15s ) " % (var_names[i], var_array[i], type(var_array[i])))
# ---------- Open file
h5ds = yt.load(filename)
initialize_pf_arrays(filename, pf_initialized)
# ---------- bounds on domain size
grid_dim = h5ds.domain_dimensions
dim_map = {'x': 0, 'y': 1, 'z': 2}
dom_l = np_array(h5ds.domain_left_edge[0:3])
dom_r = np_array(h5ds.domain_right_edge[0:3])
prtinfo("Max level of refinement is %i." % (h5ds.max_level))
# ----------- Loading other data
t = h5ds.current_time[0]
time = t.in_units('Myr')
# ----------- Checking user image limits
try:
plot_user_min = float(options.plot_range[0])
plot_user_max = float(options.plot_range[1])
except:
prtwarn("No provided ZLIM or failed to convert it into float -- using default (min/max of ds(field) )")
user_limits = False
# ------------ Organizing domain data
length_unit = 'pc'
prtinfo("Domain shape of in provided file (i, j, k): [%i, %i, %i] \033[0m" % (grid_dim[0], grid_dim[1], grid_dim[2]))
prtinfo("Domain physical dimensions in provided file (x, y, z): [%9.3f,%9.3f,%9.3f]:[%9.3f,%9.3f,%9.3f] %s \033[0m" % (dom_l[0], dom_l[1], dom_l[2], dom_r[0], dom_r[1], dom_r[2], length_unit))
avail_dim = [0, 1, 2]
avail_dims_by_slice = [[1, 2], [0, 2], [0, 1]]
if len(grid_dim) == 3 and min(grid_dim) != 1:
slice_ax = str(options.slice_info[0])
slice_coord = float(options.slice_info[1])
while slice_ax not in dim_map.keys():
slice_ax = read_var("Choose slice ax (x, y, z) : ")
while (slice_coord < dom_l[dim_map[slice_ax]]) or (slice_coord > dom_r[dim_map[slice_ax]] or isnan(slice_coord) is True): # or slice_coord < -10000:
try:
slice_coord = float(read_var("Choose slice coordinate (%d:%d %s ) (if empty, middle is assumed): \033[0m" % (dom_l[dim_map[slice_ax]], dom_r[dim_map[slice_ax]], length_unit)))
except:
slice_coord = (dom_l[dim_map[slice_ax]] + dom_r[dim_map[slice_ax]]) / 2.
prtwarn(" (empty / incorrect input): Setting slice coordinate to %s %s.\033[0m" % (slice_coord, length_unit))
elif min(grid_dim) == 1:
slice_coord = 0.0
if grid_dim[0] == 1:
slice_ax = 'x'
elif grid_dim[1] == 1:
slice_ax = 'y'
else:
slice_ax = 'z'
avail_dim = avail_dims_by_slice[dim_map[slice_ax]]
prtinfo("Slice ax set to %s, coordinate = %f %s \033[0m" % (slice_ax, slice_coord, length_unit))
resolution = [grid_dim[avail_dim[0]] * 2**h5ds.max_level, grid_dim[avail_dim[1]] * 2**h5ds.max_level]
# --------- Preparing clickable image
s = plt.figure(figsize=(12, 8), dpi=100)
s1 = plt.subplot(121)
dsSlice = h5ds.slice(slice_ax, slice_coord)
click_coords = [0, 0]
image_number = 0
if (plot_field == "b_tot"):
try:
h5ds.add_field(("gdf", plot_field), dimensions=dimensions.magnetic_field, units="", function=_total_B, display_name="Total magnetic field ($\mu$G)", sampling_type="cell")
except:
die("Failed to construct field %s" % plot_field)
if (plot_field[0:-2] == "en_ratio"):
try:
if str(dsSlice["cren01"].units) is "dimensionless": # DEPRECATED
h5ds.add_field(("gdf", plot_field), units="", function=en_ratio, display_name="Ratio e/n in %i-th bin" % int(plot_field[-2:]), sampling_type="cell")
else:
h5ds.add_field(("gdf", plot_field), units="Msun*pc**2/Myr**2", function=en_ratio, display_name="Ratio e/n in %i-th bin" % int(plot_field[-2:]), dimensions=dimensions.energy, sampling_type="cell", take_log=True)
except:
die("Failed to construct field %s" % plot_field)
dsSlice = add_tot_fields(dsSlice)
# For elegant labels when plot_field is cree?? or cren??
if (plot_field[-3:] != "tot" and plot_field[0:3] == "cre" and plot_field[3:-2] != "ratio"):
if (plot_field[0:4] == "cree"):
disp_name = "energy"
new_field_dimensions = dimensions.energy / dimensions.volume
elif (plot_field[0:4] == "cren"):
disp_name = "number"
new_field_dimensions = 1. / dimensions.volume
prtinfo("Adding display name: %s density" % disp_name)
if (display_bin_no):
disp_name = "CR electron %s density (bin %2i)" % (disp_name, int(plot_field[-2:]))
else:
disp_name = "CR electron %s density" % (disp_name)
new_field = str(plot_field + "_updated")
new_field_units = dsSlice[plot_field].units
h5ds.add_field(("gdf", new_field), units=new_field_units, function=copy_field, display_name=disp_name, dimensions=new_field_dimensions, sampling_type="cell")
plot_field = new_field
try:
field_max = h5ds.find_max("cr01")[0].v # WARNING - this makes field_max unitless
except:
field_max = h5ds.find_max("cr1")[0].v # WARNING - this makes field_max unitless
# prepare limits for framebuffer
# if (options.usr_width == 0.):
frb_w = dom_r[avail_dim[0]] + abs(dom_l[avail_dim[0]])
if (options.usr_width != 0. and not options.annotate_rect):
frb_w = float(options.usr_width)
# if (options.usr_height == 0.):
frb_h = dom_r[avail_dim[1]] + abs(dom_l[avail_dim[1]])
if (options.usr_height != 0. and not options.annotate_rect):
frb_h = float(options.usr_height)
frb_center = None
if (options.usr_center == [0., 0.] and not options.annotate_rect):
frb_center = None
elif (options.usr_center != [0., 0.] and not options.annotate_rect):
frb_center = [0, 0]
frb_center[0] = float(options.usr_center[0])
frb_center[1] = float(options.usr_center[1])
slice_center = "c"
slice_center = [0, 0, 0]
if (options.usr_center != [0., 0.] and not options.annotate_rect):
slice_center = [0, 0, 0]
slice_center[avail_dim[0]] = frb_center[0]
slice_center[avail_dim[1]] = frb_center[1]
slice_center[dim_map[slice_ax]] = slice_coord
# construct framebuffer
if (slice_ax == "y"):
frb = np_array(dsSlice.to_frb(width=frb_h, resolution=resolution, center=slice_center, height=frb_w)[plot_field])
frb = rot90(frb)
else:
frb = np_array(dsSlice.to_frb(width=frb_w, resolution=resolution, center=slice_center, height=frb_h, periodic=False)[plot_field])
if (not user_limits):
plot_max = h5ds.find_max(plot_field)[0]
if (not user_limits):
plot_min = h5ds.find_min(plot_field)[0]
plot_units = str(h5ds.all_data()[plot_field].units)
if (user_limits is True): # Overwrites previously found values
plot_min = plot_user_min
plot_max = plot_user_max
if (not_py27):
plt.xlabel("Domain cooridnates " + list(dim_map.keys())[list(dim_map.values()).index(avail_dim[0])] + " (" + length_unit + ")")
plt.ylabel("Domain cooridnates " + list(dim_map.keys())[list(dim_map.values()).index(avail_dim[1])] + " (" + length_unit + ")")
else:
plt.xlabel("Domain cooridnates " + dim_map.keys()[dim_map.values().index(avail_dim[0])] + " (" + length_unit + ")")
plt.ylabel("Domain cooridnates " + dim_map.keys()[dim_map.values().index(avail_dim[1])] + " (" + length_unit + ")")
if (options.annotate_rect):
yt_data_plot = yt.SlicePlot(h5ds, slice_ax, plot_field, width=(dom_r[avail_dim[0]] + abs(dom_l[avail_dim[0]]), dom_r[avail_dim[1]] + abs(dom_l[avail_dim[1]])), center=slice_center)
else:
yt_data_plot = yt.SlicePlot(h5ds, slice_ax, plot_field, width=(frb_w, frb_h), center=slice_center)
yt_data_plot.set_font({'size': options.fontsize})
encountered_nans = False
plot_max = float(plot_max)
plot_min = max(float(plot_min), par_epsilon)
if (isnan(plot_min) is True or isnan(plot_max) is True):
encountered_nans = True
prtwarn("Invalid data encountered (NaN), ZLIM will be adjusted")
colormap_my = copy(plt.cm.viridis)
colormap_my.set_bad(colormap_my(par_epsilon))
im_orig = "lower"
if (use_logscale):
plt.imshow(frb, extent=[dom_l[avail_dim[0]], dom_r[avail_dim[0]], dom_l[avail_dim[1]], dom_r[avail_dim[1]]], origin=im_orig, cmap=colormap_my, norm=LogNorm(vmin=plot_min, vmax=plot_max) if (encountered_nans is False) else LogNorm())
elif (use_linscale):
plt.imshow(frb, extent=[dom_l[avail_dim[0]], dom_r[avail_dim[0]], dom_l[avail_dim[1]], dom_r[avail_dim[1]]], origin=im_orig, cmap=colormap_my)
plt.title("Component: " + plot_field + " | t = %9.3f Myr" % time)
try:
cbar = plt.colorbar(shrink=0.9, pad=0.01, label=plot_units)
except:
die("An empty field might have been picked.")
if (user_annot_line is True):
prtinfo("Marking line on yt.plot at (0 0 0) : (500 500 0)")
yt_data_plot.annotate_line((0., 0., 0.), (500., 500.0, 0), plot_args={'color': 'white', "lw": 2.0})
if (plot_vel):
yt_data_plot.annotate_velocity(factor=32, scale=3e7)
if (plot_mag):
yt_data_plot.annotate_magnetic_field(factor=32, scale=40)
yt_data_plot.set_cmap(field=plot_field, cmap=colormap_my)
yt_data_plot.set_zlim(plot_field, plot_min, plot_max)
marker_l = ["x", "+", "*", "X", ".", "^", "v", "<", ">", "1"]
m_size_l = [350, 500, 400, 400, 500, 350, 350, 350, 350, 500]
m_e_width = 5
marker_index = 0
plt.subplots_adjust(left=0.075, right=0.975, hspace=0.12)
plt.tight_layout()
print("")
crs_initialize(var_names, var_array)
mplot = yt_data_plot.plots[plot_field]
xticklabels = mplot.axes.xaxis.get_ticklabels()
yticklabels = mplot.axes.yaxis.get_ticklabels()
# ---------
def read_click_and_plot(event):
global click_coords, image_number, f_run, marker_index
exit_code = True
if (marker_index == len(marker_l) or marker_index == len(m_size_l)):
marker_index = 0
click_coords = [event.xdata, event.ydata]
coords = [slice_coord, slice_coord, slice_coord]
if slice_ax == "x":
coords[1] = click_coords[0]
coords[2] = click_coords[1]
elif slice_ax == "y":
coords[0] = click_coords[0]
coords[2] = click_coords[1]
else: # slice_ax = "z"
coords[0] = click_coords[0]
coords[1] = click_coords[1]
mark_plot_save(coords)
def mark_plot_save(coords):
global click_coords, image_number, f_run, marker_index
# ------------ preparing data and passing -------------------------
position = h5ds.r[coords:coords]
if (plot_field[0:-2] != "en_ratio"):
prtinfo(">>>>>>>>>>>>>>>>>>> Value of %s at point [%f, %f, %f] = %f " % (plot_field, coords[0], coords[1], coords[2], position[plot_field]))
else:
prtinfo("Value of %s at point [%f, %f, %f] = %f " % (plot_field, coords[0], coords[1], coords[2], position["cree" + str(plot_field[-2:])] / position["cren" + str(plot_field[-2:])]))
plot_max = h5ds.find_max("cre" + plot_var + str(plot_field[-2:]))[0] # once again appended - needed as ylimit for the plot
btot = (position["mag_field_x"].v**2 + position["mag_field_y"].v**2 + position["mag_field_z"].v**2)**0.5
btot_uG = 2.85 * btot # WARNING magic number @btot - conversion factor
prtinfo("B_tot = %f = %f (uG)" % (btot, btot_uG))
if (True): # TODO DEPRECATED save_fqp
ecrs = []
ncrs = []
if (plot_ovlp is not True): # overwrites position
if (plot_layer is True):
prtinfo("Plotting layer...")
position = h5ds.r[[coords[0], dom_l[avail_dim[0]], coords[2]]: [coords[0], dom_r[avail_dim[0]], coords[2]]]
elif (options.annotate_rect):
# define borders of the selected region (plane)
usr_w = float(options.usr_width)
usr_h = float(options.usr_height)
usr_c = [0, 0]
usr_c[:] = [float(options.usr_center[0]), float(options.usr_center[1])]
lb = [0, 0, 0]
lb[avail_dim[0]] = usr_c[0] - usr_w * 0.5
lb[avail_dim[1]] = usr_c[1] - usr_h * 0.5
lb[dim_map[slice_ax]] = slice_coord - (dom_r[avail_dim[0]] - dom_l[avail_dim[0]]) / int(h5ds.domain_dimensions[avail_dim[0]])
rb = [0, 0, 0]
rb[avail_dim[0]] = usr_c[0] + usr_w * 0.5
rb[avail_dim[1]] = usr_c[1] - usr_h * 0.5
rb[dim_map[slice_ax]] = slice_coord
lt = [0, 0, 0]
lt[avail_dim[0]] = usr_c[0] - usr_w * 0.5
lt[avail_dim[1]] = usr_c[1] + usr_h * 0.5
lt[dim_map[slice_ax]] = slice_coord
rt = [0, 0, 0]
rt[avail_dim[0]] = usr_c[0] + usr_w * 0.5
rt[avail_dim[1]] = usr_c[1] + usr_h * 0.5
rt[dim_map[slice_ax]] = slice_coord + (dom_r[avail_dim[0]] - dom_l[avail_dim[0]]) / int(h5ds.domain_dimensions[avail_dim[0]])
# select region as plane spreading between lb and rt corners
position = yt.data_objects.selection_data_containers.YTRegion(left_edge=lb, right_edge=rt, center=usr_c, ds=h5ds) # (center, left_edge, right_edge
coords[avail_dim[0]:avail_dim[1]] = usr_c[:]
coords[dim_map[slice_ax]] = slice_coord
for ind in range(1, ncre + 1):
ecrs.append(float(mean(position['cree' + str(ind).zfill(2)][0].v)))
ncrs.append(float(mean(position['cren' + str(ind).zfill(2)][0].v)))
fig2, exit_code = crs_plot_main(plot_var, ncrs, ecrs, time, coords, marker=marker_l[marker_index], clean_plot=options.clean_plot, hide_axes=options.no_axes)
elif (plot_ovlp is True): # for overlap_layer
prtinfo("Plotting layer with overlap...")
dnum = int(h5ds.domain_dimensions[avail_dim[0]])
dl = (dom_r[avail_dim[0]] - dom_l[avail_dim[0]]) / float(dnum)
for j in range(dnum):
position = position = h5ds.r[[coords[0], dom_l[avail_dim[0]] + dl * j, coords[2]]: [coords[0], dom_l[avail_dim[0]] + dl * j, coords[2]]]
for ind in range(1, ncre + 1):
ecrs.append(position['cree' + str(ind).zfill(2)][0].v)
ncrs.append(position['cren' + str(ind).zfill(2)][0].v)
fig2, exit_code_tmp = crs_plot_main(plot_var, ncrs, ecrs, time, coords, marker=marker_l[marker_index], i_plot=image_number, clean_plot=options.clean_plot, hide_axes=options.no_axes)
if (exit_code_tmp is False):
exit_code = exit_code_tmp # Just one plot is allright
ecrs = []
ncrs = []
else: # for fqp, DEPRECATED probably
fcrs = []
qcrs = []
pcut = [0., 0.]
ecrs = []
ncrs = []
for ind in range(1, ncre + 1):
ecrs.append(float(position['cree' + str(ind).zfill(2)][0].v))
ncrs.append(float(position['cren' + str(ind).zfill(2)][0].v))
for ind in range(1, ncre + 2):
fcrs.append(float(position['cref' + str(ind).zfill(2)][0].v))
for ind in range(1, ncre + 1):
qcrs.append(float(position['creq' + str(ind).zfill(2)][0].v))
pcut[:] = [position['crep01'][0].v, position['crep02'][0].v]
fig2, exit_code = crs_plot_main_fpq(var_names, var_array, plot_var, fcrs, qcrs, pcut, field_max, time, coords, marker=marker_l[marker_index], clean_plot=options.clean_plot)
if (exit_code is not True):
if ((plot_layer is True) or (plot_ovlp is True)):
line = s1.plot([dom_l[avail_dim[0]], dom_r[avail_dim[0]]], [coords[2], coords[2]], color="white") # plot line (layer) if cell not empty WARNING - for now only works with mcrwind
else:
point = s1.plot(coords[avail_dim[0]], coords[avail_dim[1]], marker=marker_l[marker_index], color="red") # plot point if cell not empty
s.savefig('results/' + filename_nam + '_' + plot_var + '_%04d.png' % image_number, transparent='True')
# prtinfo(" ---> Saved plot to: %s " %str('results/'+filename_nam+'_'+plot_var+'_%04d.png' %image_number))
if (plot_layer is True): # Mark averaged level
yt_data_plot.annotate_line([coords[0], dom_l[avail_dim[0]], coords[2]], [coords[0], dom_r[avail_dim[0]], coords[2]], plot_args={'color': 'white', "lw": 10.0})
else:
if (not options.annotate_rect and marker_index > 0):
yt_data_plot.annotate_marker(coords, marker=marker_l[marker_index - 1], plot_args={'color': 'red', 's': m_size_l[marker_index - 1], "lw": 4.5}) # cumulatively annotate all clicked coordinates
if (options.annotate_rect):
yt_data_plot.annotate_line(lb, lt, plot_args={'color': 'white', "lw": 2.0})
yt_data_plot.annotate_line(lt, rt, plot_args={'color': 'white', "lw": 2.0})
yt_data_plot.annotate_line(rt, rb, plot_args={'color': 'white', "lw": 2.0})
yt_data_plot.annotate_line(rb, lb, plot_args={'color': 'white', "lw": 2.0})
marker_index = marker_index + 1
image_number = image_number + 1
# ------------- saving just the spectrum
if (save_spectrum):
extent = fig2.get_window_extent().transformed(s.dpi_scale_trans.inverted())
s.savefig('results/' + filename_nam + '_' + 'slice_' + slice_ax + '_' + plot_var + '_spec_%03d.pdf' % image_number, transparent='True', bbox_inches="tight", dpi=150) # bbox not working in py27 FIXME
prtinfo(" ---> Saved plot to: %s.\n\033[44mPress 'q' to quit and save yt.SlicePlot with marked coordinates." % str('results/' + filename_nam + '_' + 'slice_' + slice_ax + '_' + plot_var + '_spectrum_%04d.pdf' % image_number))
else:
prtwarn("Empty cell - not saving.")
if (f_run):
f_run = False
def plot_with_coords_provided(coords_in):
mark_plot_save(coords_in)
if (user_coords_provided):
prtinfo("Provided coordintates %s (clickable map will not be shown) , processing image." % str(user_coords))
plot_with_coords_provided(user_coords)
else:
prtinfo("\033[44mClick LMB on the colormap to display spectrum ('q' to exit)")
cid = s.canvas.mpl_connect('button_press_event', read_click_and_plot)
plt.show()
text_coords = [0., 0., 0.]
text_coords[dim_map.get(slice_ax)] = slice_coord
text_coords[avail_dim[0]] = dom_l[avail_dim[0]]
text_coords[avail_dim[1]] = dom_l[avail_dim[1]]
text_coords = [item * 0.9 for item in text_coords]
if (user_annot_time):
if (user_draw_timebox is True):
yt_data_plot.annotate_text(text_coords, 'T = {:0.2f} Myr'.format(float(t.in_units('Myr'))), text_args={'fontsize': options.fontsize, 'color': 'white', 'alpha': '0.0'}, inset_box_args={'boxstyle': 'round', 'pad': 0.2, 'alpha': 0.8})
else:
prtinfo("Not marking line on yt.plot (user_draw_timebox = %s)" % (user_draw_timebox))
yt_data_plot.annotate_title('T = {:0.2f} Myr'.format(float(t.in_units('Myr'))))
if (options.no_cbar):
yt_data_plot.hide_colorbar()
if (options.no_xlabels and options.no_ylabels):
yt_data_plot.hide_axes()
yt_data_plot.save('results/' + filename_nam + '_' + plot_field + '_sliceplot_' + slice_ax + '.pdf') # save image (spectrum already saved) when finished.
if (not user_coords_provided):
s.canvas.mpl_disconnect(cid)
|
gpl-3.0
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/4_categories/test11_cross_validate_categories_1200ms_scaled_method_i.py
|
1
|
5041
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_I import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
mit
|
NunoEdgarGub1/scikit-learn
|
sklearn/decomposition/__init__.py
|
147
|
1421
|
"""
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
|
bsd-3-clause
|
xyguo/scikit-learn
|
examples/svm/plot_svm_anova.py
|
85
|
2024
|
"""
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
ibukanov/boulder
|
test/load-generator/latency-charter.py
|
3
|
5438
|
#!/usr/bin/python
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import datetime
import json
import pandas
import matplotlib
import argparse
import os
matplotlib.style.use('ggplot')
# sacrifical plot for single legend
matplotlib.rcParams['figure.figsize'] = 1, 1
randFig = plt.figure()
randAx = plt.subplot()
randAx.plot(0, 0, color='green', label='good', marker='+')
randAx.plot(0, 0, color='red', label='failed', marker='x')
randAx.plot(0, 0, color='black', label='sent', linestyle='--')
randAx.plot(0, 0, color='green', label='50th quantile')
randAx.plot(0, 0, color='orange', label='90th quantile')
randAx.plot(0, 0, color='red', label='99th quantile')
handles, labels = randAx.get_legend_handles_labels()
# big ol' plotting method
def plot_section(all_data, title, outputPath):
# group calls by the endpoint/method
actions = all_data.groupby('action')
h = len(actions.groups.keys())
matplotlib.rcParams['figure.figsize'] = 20, 3 * h
fig = plt.figure()
fig.legend(handles, labels, ncol=6, fontsize=16, framealpha=0, loc='upper center')
if title is not None:
fig.suptitle(title, fontsize=20, y=0.93)
gs = gridspec.GridSpec(h, 3)
# figure out left and right datetime bounds
started = all_data['sent'].min()
stopped = all_data['finished'].max()
i = 0
# plot one row of charts for each endpoint/method combination
for section in actions.groups.keys():
# setup the tree charts
ax = fig.add_subplot(gs[i, 0])
ax.set_title(section)
ax.set_xlim(started, stopped)
ax2 = fig.add_subplot(gs[i, 2])
ax2.set_xlim(started, stopped)
ax3 = fig.add_subplot(gs[i, 1])
ax3.set_xlim(started, stopped)
# find the maximum y value and set it across all three charts
calls = actions.get_group(section)
tookMax = calls['took'].max()
ax.set_ylim(0, tookMax+tookMax*0.1)
ax2.set_ylim(0, tookMax+tookMax*0.1)
ax3.set_ylim(0, tookMax+tookMax*0.1)
groups = calls.groupby('type')
if groups.groups.get('error', False) is not False:
bad = groups.get_group('error')
ax.plot_date(bad['finished'], bad['took'], color='red', marker='x', label='error')
bad_rate = bad.set_index('finished')
bad_rate['rate'] = [0] * len(bad_rate.index)
bad_rate = bad_rate.resample('5S').count()
bad_rate['rate'] = bad_rate['rate'].divide(5)
rateMax = bad_rate['rate'].max()
ax2.plot_date(bad_rate.index, bad_rate['rate'], linestyle='-', marker='', color='red', label='error')
if groups.groups.get('good', False) is not False:
good = groups.get_group('good')
ax.plot_date(good['finished'], good['took'], color='green', marker='+', label='good')
good_rate = good.set_index('finished')
good_rate['rate'] = [0] * len(good_rate.index)
good_rate = good_rate.resample('5S').count()
good_rate['rate'] = good_rate['rate'].divide(5)
rateMax = good_rate['rate'].max()
ax2.plot_date(good_rate.index, good_rate['rate'], linestyle='-', marker='', color='green', label='good')
ax.set_ylabel('Latency (ms)')
# calculate the request rate
sent_rate = pandas.DataFrame(calls['sent'])
sent_rate = sent_rate.set_index('sent')
sent_rate['rate'] = [0] * len(sent_rate.index)
sent_rate = sent_rate.resample('5S').count()
sent_rate['rate'] = sent_rate['rate'].divide(5)
if sent_rate['rate'].max() > rateMax:
rateMax = sent_rate['rate'].max()
ax2.plot_date(sent_rate.index, sent_rate['rate'], linestyle='--', marker='', color='black', label='sent')
ax2.set_ylim(0, rateMax+rateMax*0.1)
ax2.set_ylabel('Rate (per second)')
# calculate and plot latency quantiles
calls = calls.set_index('finished')
calls = calls.sort_index()
quan = pandas.DataFrame(calls['took'])
for q, c in [[.5, 'green'], [.9, 'orange'], [.99, 'red']]:
quanN = quan.rolling(500, center=True).quantile(q)
ax3.plot(quanN['took'].index, quanN['took'], color=c)
ax3.set_ylabel('Latency quantiles (ms)')
i += 1
# format x axes
for ax in fig.axes:
matplotlib.pyplot.sca(ax)
plt.xticks(rotation=30, ha='right')
majorFormatter = matplotlib.dates.DateFormatter('%H:%M:%S')
ax.xaxis.set_major_formatter(majorFormatter)
# save image
gs.update(wspace=0.275, hspace=0.5)
fig.savefig(outputPath, bbox_inches='tight')
# and the main event
parser = argparse.ArgumentParser()
parser.add_argument('chartData', type=str, help='Path to file containing JSON chart output from load-generator')
parser.add_argument('--output', type=str, help='Path to save output to', default='latency-chart.png')
parser.add_argument('--title', type=str, help='Chart title')
args = parser.parse_args()
with open(args.chartData) as data_file:
stuff = []
for l in data_file.readlines():
stuff.append(json.loads(l))
df = pandas.DataFrame(stuff)
df['finished'] = pandas.to_datetime(df['finished']).astype(datetime.datetime)
df['sent'] = pandas.to_datetime(df['sent']).astype(datetime.datetime)
df['took'] = df['took'].divide(1000000)
plot_section(df, args.title, args.output)
|
mpl-2.0
|
ndingwall/scikit-learn
|
examples/model_selection/plot_learning_curve.py
|
5
|
7001
|
"""
========================
Plotting Learning Curves
========================
In the first column, first row the learning curve of a naive Bayes classifier
is shown for the digits dataset. Note that the training score and the
cross-validation score are both not very good at the end. However, the shape
of the curve can be found in more complex datasets very often: the training
score is very high at the beginning and decreases and the cross-validation
score is very low at the beginning and increases. In the second column, first
row we see the learning curve of an SVM with RBF kernel. We can see clearly
that the training score is still around the maximum and the validation score
could be increased with more training samples. The plots in the second row
show the times required by the models to train with various sizes of training
dataset. The plots in the third row show how much time was required to train
the models for each training sizes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, axes=None, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate 3 plots: the test and training learning curve, the training
samples vs fit times curve, the fit times vs score curve.
Parameters
----------
estimator : estimator instance
An estimator instance implementing `fit` and `predict` methods which
will be cloned for each validation.
title : str
Title for the chart.
X : array-like of shape (n_samples, n_features)
Training vector, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
y : array-like of shape (n_samples) or (n_samples, n_features)
Target relative to ``X`` for classification or regression;
None for unsupervised learning.
axes : array-like of shape (3,), default=None
Axes to use for plotting the curves.
ylim : tuple of shape (2,), default=None
Defines minimum and maximum y-values plotted, e.g. (ymin, ymax).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like of shape (n_ticks,), dtype={int, float}
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the ``dtype`` is float, it is regarded
as a fraction of the maximum size of the training set (that is
determined by the selected validation method), i.e. it has to be within
(0, 1]. Otherwise it is interpreted as absolute sizes of the training
sets. Note that for classification the number of samples usually have
to be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = \
learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
axes[0].fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="g")
axes[0].plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
axes[0].plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
axes[0].legend(loc="best")
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, 'o-')
axes[1].fill_between(train_sizes, fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std, alpha=0.1)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
# Plot fit_time vs score
axes[2].grid()
axes[2].plot(fit_times_mean, test_scores_mean, 'o-')
axes[2].fill_between(fit_times_mean, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
fig, axes = plt.subplots(3, 2, figsize=(10, 15))
X, y = load_digits(return_X_y=True)
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, axes=axes[:, 0], ylim=(0.7, 1.01),
cv=cv, n_jobs=4)
title = r"Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, axes=axes[:, 1], ylim=(0.7, 1.01),
cv=cv, n_jobs=4)
plt.show()
|
bsd-3-clause
|
sangwook236/general-development-and-testing
|
sw_dev/python/rnd/test/image_processing/skimage/skimage_thresholding.py
|
2
|
6105
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import skimage
import skimage.filters, skimage.morphology
import matplotlib
import matplotlib.pyplot as plt
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_thresholding.html
def try_all_threshold_example():
img = skimage.data.page()
# Specify a radius for local thresholding algorithms.
# If it is not specified, only global algorithms are called.
fig, ax = skimage.filters.try_all_threshold(img, figsize=(10, 8), verbose=False)
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_thresholding.html
def threshold_example():
image = skimage.data.camera()
#thresh = skimage.filters.threshold_isodata(image)
#thresh = skimage.filters.threshold_li(image)
#thresh = skimage.filters.threshold_mean(image)
#thresh = skimage.filters.threshold_minimum(image)
thresh = skimage.filters.threshold_otsu(image)
#thresh = skimage.filters.threshold_triangle(image)
#thresh = skimage.filters.threshold_yen(image)
binary = image > thresh
fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5))
ax = axes.ravel()
ax[0] = plt.subplot(1, 3, 1)
ax[1] = plt.subplot(1, 3, 2)
ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0])
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].hist(image.ravel(), bins=256)
ax[1].set_title('Histogram')
ax[1].axvline(thresh, color='r')
ax[2].imshow(binary, cmap=plt.cm.gray)
ax[2].set_title('Thresholded')
ax[2].axis('off')
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_multiotsu.html
def multiotsu_example():
# Setting the font size for all plots.
matplotlib.rcParams['font.size'] = 9
# The input image.
image = skimage.data.camera()
# Applying multi-Otsu threshold for the default value, generating three classes.
thresholds = skimage.filters.threshold_multiotsu(image)
# Using the threshold values, we generate the three regions.
regions = np.digitize(image, bins=thresholds)
#regions_colorized = skimage.color.label2rgb(regions)
#plt.imshow(regions_colorized)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(10, 3.5))
# Plotting the original image.
ax[0].imshow(image, cmap='gray')
ax[0].set_title('Original')
ax[0].axis('off')
# Plotting the histogram and the two thresholds obtained from multi-Otsu.
ax[1].hist(image.ravel(), bins=255)
ax[1].set_title('Histogram')
for thresh in thresholds:
ax[1].axvline(thresh, color='r')
# Plotting the Multi Otsu result.
ax[2].imshow(regions, cmap='Accent')
ax[2].set_title('Multi-Otsu result')
ax[2].axis('off')
plt.subplots_adjust()
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/applications/plot_thresholding.html
def local_otsu_threshold_example():
# Otsu's threshold method can be applied locally.
# For each pixel, an "optimal" threshold is determined by maximizing the variance between two classes of pixels of the local neighborhood defined by a structuring element.
img = skimage.util.img_as_ubyte(skimage.data.page())
radius = 15
selem = skimage.morphology.disk(radius)
local_otsu = skimage.filters.rank.otsu(img, selem)
threshold_global_otsu = skimage.filters.threshold_otsu(img)
global_otsu = img >= threshold_global_otsu
fig, axes = plt.subplots(2, 2, figsize=(8, 5), sharex=True, sharey=True)
ax = axes.ravel()
plt.tight_layout()
fig.colorbar(ax[0].imshow(img, cmap=plt.cm.gray), ax=ax[0], orientation='horizontal')
ax[0].set_title('Original')
ax[0].axis('off')
fig.colorbar(ax[1].imshow(local_otsu, cmap=plt.cm.gray), ax=ax[1], orientation='horizontal')
ax[1].set_title('Local Otsu (radius=%d)' % radius)
ax[1].axis('off')
ax[2].imshow(img >= local_otsu, cmap=plt.cm.gray)
ax[2].set_title('Original >= Local Otsu' % threshold_global_otsu)
ax[2].axis('off')
ax[3].imshow(global_otsu, cmap=plt.cm.gray)
ax[3].set_title('Global Otsu (threshold = %d)' % threshold_global_otsu)
ax[3].axis('off')
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/applications/plot_thresholding.html
def local_threshold_example():
image = skimage.data.page()
global_thresh = skimage.filters.threshold_otsu(image)
binary_global = image > global_thresh
block_size = 35
local_thresh = skimage.filters.threshold_local(image, block_size, offset=10)
binary_local = image > local_thresh
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax = axes.ravel()
plt.gray()
ax[0].imshow(image)
ax[0].set_title('Original')
ax[1].imshow(binary_global)
ax[1].set_title('Global thresholding')
ax[2].imshow(binary_local)
ax[2].set_title('Local thresholding')
for a in ax:
a.axis('off')
plt.show()
# REF [site] >> https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_niblack_sauvola.html
def niblack_and_sauvola_example():
matplotlib.rcParams['font.size'] = 9
image = skimage.data.page()
binary_global = image > skimage.filters.threshold_otsu(image)
window_size = 25
thresh_niblack = skimage.filters.threshold_niblack(image, window_size=window_size, k=0.8)
thresh_sauvola = skimage.filters.threshold_sauvola(image, window_size=window_size)
binary_niblack = image > thresh_niblack
binary_sauvola = image > thresh_sauvola
plt.figure(figsize=(8, 7))
plt.subplot(2, 2, 1)
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Original')
plt.axis('off')
plt.subplot(2, 2, 2)
plt.title('Global Threshold')
plt.imshow(binary_global, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(binary_niblack, cmap=plt.cm.gray)
plt.title('Niblack Threshold')
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(binary_sauvola, cmap=plt.cm.gray)
plt.title('Sauvola Threshold')
plt.axis('off')
plt.show()
def main():
#try_all_threshold_example()
#threshold_example()
#multiotsu_example()
# Local/adaptive thresholding.
local_otsu_threshold_example()
#local_threshold_example()
#niblack_and_sauvola_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
gpl-2.0
|
Chaparqanatoos/kaggle-knowledge
|
src/main/python/BagOfWords.py
|
1
|
4030
|
#!/usr/bin/env python
# Author: Angela Chapman
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Part 1 of the tutorial on Natural Language Processing.
#
# *************************************** #
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility
import pandas as pd
import numpy as np
if __name__ == '__main__':
train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0, \
delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t", \
quoting=3)
print 'The first review is:'
print train["review"][0]
raw_input("Press Enter to continue...")
print 'Download text data sets. If you already have NLTK datasets downloaded, just close the Python download window...'
# nltk.download() # Download text data sets, including stop words
# Initialize an empty list to hold the clean reviews
clean_train_reviews = []
# Loop over each review; create an index i that goes from 0 to the length
# of the movie review list
print "Cleaning and parsing the training set movie reviews...\n"
for i in xrange(0, len(train["review"])):
clean_train_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(train["review"][i], True)))
# ****** Create a bag of words from the training set
#
print "Creating the bag of words...\n"
# Initialize the "CountVectorizer" object, which is scikit-learn's
# bag of words tool.
vectorizer = CountVectorizer(analyzer="word", \
tokenizer=None, \
preprocessor=None, \
stop_words=None, \
max_features=5000)
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of
# strings.
train_data_features = vectorizer.fit_transform(clean_train_reviews)
# Numpy arrays are easy to work with, so convert the result to an
# array
train_data_features = train_data_features.toarray()
# ******* Train a random forest using the bag of words
#
print "Training the random forest (this may take a while)..."
# Initialize a Random Forest classifier with 100 trees
forest = RandomForestClassifier(n_estimators=100)
# Fit the forest to the training set, using the bag of words as
# features and the sentiment labels as the response variable
#
# This may take a few minutes to run
forest = forest.fit(train_data_features, train["sentiment"])
# Create an empty list and append the clean reviews one by one
clean_test_reviews = []
print "Cleaning and parsing the test set movie reviews...\n"
for i in xrange(0, len(test["review"])):
clean_test_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(test["review"][i], True)))
# Get a bag of words for the test set, and convert to a numpy array
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
# Use the random forest to make sentiment label predictions
print "Predicting test labels...\n"
result = forest.predict(test_data_features)
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
output = pd.DataFrame(data={"id":test["id"], "sentiment":result})
# Use pandas to write the comma-separated output file
output.to_csv(os.path.join(os.path.dirname(__file__), 'data', 'Bag_of_Words_model.csv'), index=False, quoting=3)
print "Wrote results to Bag_of_Words_model.csv"
|
apache-2.0
|
bousmalis/models
|
autoencoder/AutoencoderRunner.py
|
12
|
1660
|
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.Autoencoder import Autoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = Autoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
|
apache-2.0
|
chrinide/theanets
|
examples/recurrent-text.py
|
1
|
2017
|
#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
import utils
climate.enable_default_logging()
COLORS = ['#d62728', '#1f77b4', '#2ca02c', '#9467bd', '#ff7f0e',
'#e377c2', '#8c564b', '#bcbd22', '#7f7f7f', '#17becf']
URL = 'http://www.gutenberg.org/cache/epub/2701/pg2701.txt'
with open(utils.find('moby.txt', URL)) as handle:
text = theanets.recurrent.Text(handle.read().lower().replace('\n', ' '))
seed = text.encode(text.text[200000:200010])
for i, layer in enumerate((
dict(form='rnn', activation='sigmoid'),
dict(form='gru', activation='sigmoid'),
dict(form='scrn', activation='linear'),
dict(form='lstm'),
dict(form='mrnn', activation='sigmoid', factors=len(text.alpha)),
dict(form='clockwork', activation='linear', periods=(1, 2, 4, 8, 16)))):
losses = []
layer.update(size=100)
net = theanets.recurrent.Classifier([
1 + len(text.alpha), layer, 1000, 1 + len(text.alpha)])
for tm, _ in net.itertrain(text.classifier_batches(30, 16),
min_improvement=0.99,
validate_every=50,
patience=0,
algo='rmsprop',
learning_rate=0.0001):
if np.isnan(tm['loss']):
break
print('{}|{} ({:.1f}%)'.format(
text.decode(seed),
text.decode(net.predict_sequence(seed, 30)),
100 * tm['acc']))
losses.append(tm['loss'])
plt.plot(losses, label=layer['form'], alpha=0.7, color=COLORS[i])
plt.gca().xaxis.tick_bottom()
plt.gca().yaxis.tick_left()
plt.gca().spines['top'].set_color('none')
plt.gca().spines['right'].set_color('none')
plt.gca().spines['bottom'].set_position(('outward', 6))
plt.gca().spines['left'].set_position(('outward', 6))
plt.gca().set_ylabel('Loss')
plt.gca().set_xlabel('Training Epoch')
plt.gca().grid(True)
plt.legend()
plt.show()
|
mit
|
google/audio-to-tactile
|
extras/python/phonetics/phone_model.py
|
1
|
22911
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Train and eval a network for mapping audio to 2D vowel space coordinate."""
import datetime
import functools
import os
import os.path
import random
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple
from absl import flags
import dataclasses
import haiku as hk
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.figure
import numpy as np
import optax
import scipy.ndimage
from extras.python.phonetics import hk_util
from extras.python.phonetics import phone_util
from extras.python.phonetics import plot
from extras.python.phonetics import stats
# By default, train to classify these monophthong vowel classes.
# Additionally, TIMIT has these consonant classes (after merging several very
# similar classes):
# r,z,n,f,dh,s,v,m,l,sh,hh,ng,w,q,y,k,th,el,p,ch,t,en,jh,g,b,zh,d,em,dx,nx,eng
# and there is a "sil" (silence) class for pauses between speech.
DEFAULT_CLASSES = 'aa,uw,ih,iy,eh,ae,ah,er'
FLAGS = flags.FLAGS
# Model hyperparameters.
flags.DEFINE_list('classes', DEFAULT_CLASSES,
'The model is trained to classify these phoneme classes.')
flags.DEFINE_list('hidden_units', ['16', '16'],
'List where the ith element represents the number of units '
'in the ith hidden layer.')
flags.DEFINE_float('h1_penalty', 1e-4,
'h1 regularizer penalty weight on the first layer.')
flags.DEFINE_float('l1_penalty', 1e-4,
'L1 regularizer penalty weight on the other layers.')
flags.DEFINE_float('disperse_penalty', 0.75,
'Penalty to disperse embedded points of different labels.')
flags.DEFINE_float('disperse_separation', 0.3,
'Parameter of disperse penalty.')
flags.DEFINE_float('mapping_penalty', 400.0,
'Penalty to encourage matching MAPPING_TARGETS.')
flags.DEFINE_float('mapping_delta', 0.1,
'Charbonnier delta parameter in mapping penalty.')
# Training flags.
flags.DEFINE_float('validation_fraction', 0.05,
'Fraction of training dataset to use for validation.')
flags.DEFINE_integer('num_epochs', 10,
'Number of training epochs.')
flags.DEFINE_integer('batch_size', 512,
'Number of training examples per batch.')
def angle2cart(angle_deg: float, mag: float = 1.0) -> Tuple[float, float]:
theta = angle_deg * np.pi / 180.0
return (mag * np.cos(theta), mag * np.sin(theta))
# 2D target mapping coordinates for each class.
MAPPING_TARGETS = {
'aa': angle2cart(-90),
'uw': angle2cart(-30),
'ih': angle2cart(30),
'iy': angle2cart(90),
'eh': angle2cart(150),
'ae': angle2cart(-150),
'ah': (0.0, 0.0),
'er': angle2cart(0),
'uh': angle2cart(-90, 0.5),
}
@dataclasses.dataclass
class Metadata:
"""Metadata for phone model."""
classes: Sequence[str]
hidden_units: Sequence[int]
h1_penalty: float
l1_penalty: float
disperse_penalty: float
disperse_separation: float
mapping_penalty: float
mapping_delta: float
validation_fraction: float
num_epochs: int
batch_size: int
dataset_metadata: Optional[Mapping[str, Any]] = None
@staticmethod
def from_flags() -> 'Metadata':
"""Construct Metadata from flags."""
return Metadata(
classes=FLAGS.classes,
hidden_units=tuple(int(units) for units in FLAGS.hidden_units),
h1_penalty=FLAGS.h1_penalty,
l1_penalty=FLAGS.l1_penalty,
disperse_penalty=FLAGS.disperse_penalty,
disperse_separation=FLAGS.disperse_separation,
mapping_penalty=FLAGS.mapping_penalty,
mapping_delta=FLAGS.mapping_delta,
validation_fraction=FLAGS.validation_fraction,
num_epochs=FLAGS.num_epochs,
batch_size=FLAGS.batch_size,
)
def load_dataset(npz_file: str,
classes: Sequence[str],
class_weights: Optional[Dict[str, float]] = None,
) -> phone_util.Dataset:
"""Loads training or testing data from a numpy .npz file.
The .npz file holds 3D arrays of examples. The arrays are named according to
which class they represent, e.g. an array named 'ae' represents examples with
ground truth label 'ae'.
Args:
npz_file: String, npz filename.
classes: List of phoneme class names to train the model to classify.
class_weights: Dict, class weights for randomly subsampling the data. The
fraction of examples retained for class `phone` is
`class_weights.get(phone, 1.0) / max(class_weights.value())`
Returns:
Dataset.
"""
if class_weights is None:
class_weights = {}
max_weight = max(class_weights.values()) if class_weights else 1.0
class_weights = {phone: class_weights.get(phone, 1.0) / max_weight
for phone in classes}
dataset = phone_util.read_dataset_npz(npz_file)
dataset.subsample(class_weights)
return dataset
def embedding_regularizer(embedded: jnp.ndarray,
labels: jnp.ndarray,
meta: Metadata) -> jnp.ndarray:
"""Penalty to encourage good distribution in the embedding space."""
embedded = embedded[:, -1, :]
batch_size = embedded.shape[0]
# Penalize close points of different labels according to
# 1 / (1 + (min(distance, separation) / separation)^2)
# Comparing all pairs of points would cost quadratically with batch size. To
# reduce the cost to linear, we compare only the first point to the rest of
# the batch.
p0 = embedded[0]
others = embedded[1:]
separation_sqr = meta.disperse_separation**2
dist_sqr = (p0[0] - others[:, 0])**2 + (p0[1] - others[:, 1])**2
penalties = meta.disperse_penalty * jnp.sum(
jnp.where(labels[0] != labels[1:], 1, 0.0) *
1 / (1 + jnp.minimum(dist_sqr, separation_sqr) / separation_sqr))
# For phones in MAPPING_TARGETS, use a Charbonnier loss to encourage points of
# those labels to be close to the target.
for i, phone in enumerate(meta.classes):
if phone in MAPPING_TARGETS:
tx, ty = MAPPING_TARGETS[phone]
penalties += meta.mapping_penalty * hk_util.charbonnier_loss_on_squared(
jnp.dot(labels == i,
(embedded[:, 0] - tx)**2 + (embedded[:, 1] - ty)**2),
meta.mapping_delta)
return penalties / batch_size
def model_fun(batch, meta: Metadata) -> Dict[str, jnp.ndarray]:
"""Builds model for phone mapping."""
num_frames = 1 + meta.dataset_metadata['num_frames_left_context']
num_channels = meta.dataset_metadata['num_channels']
penalties = 0.0
# The network input x has shape (batch, num_frames, num_channels), where
# typically batch=512, num_frames=3, num_channels=56.
x = batch['observed'].astype(jnp.float32)
# Compute mean PCEN power of the frame.
mean_power = jnp.mean(x, axis=-1, keepdims=True)
#### Encoder. ####
# The first few layers of the network process each frame independently. We
# temporarily reshape to (batch * num_frames, num_channels), flattening the
# frames dimension into the batch dimension.
x = jnp.reshape(x, (-1, num_channels))
h1_regularizer = lambda w: FLAGS.h1_penalty * hk_util.h1_loss(w)
l1_regularizer = lambda w: FLAGS.l1_penalty * hk_util.l1_loss(w)
# Apply several fully-connected layers. Use H1 regularization on the first
# layer to encourage smoothness along the channel dimension.
for i, units in enumerate(meta.hidden_units):
w_regularizer = h1_regularizer if i == 0 else l1_regularizer
x, penalty_term = hk_util.Linear(units, w_regularizer=w_regularizer)(x)
penalties += penalty_term
x = jax.nn.relu(x)
# Bottleneck layer, mapping the frame down to a 2D embedding space. We use
# tanh activation to restrict embedding to the square [-1, 1] x [-1, 1].
x, penalty_term = hk_util.Linear(2, w_regularizer=None)(x)
penalties += penalty_term
# Constrain embedded point to the hexagon.
embed_r = 1e-4 + hk_util.hexagon_norm(x[:, 0], x[:, 1])
x *= (jax.lax.tanh(embed_r) / embed_r).reshape(-1, 1)
# Now we reshape the frame dimension back out of the batch dimension. The next
# steps will process the embedded frames jointly.
embedded = x = jnp.reshape(x, (-1, num_frames, x.shape[-1]))
# Concatenate with mean_power to make a 3D embedding space. This extra
# dimension is meant as a proxy for the information in the energy envelope.
x = jnp.concatenate((x, mean_power), axis=-1)
#### Decoder. ####
# Decoder with a fixed architecture of 16-unit hidden layer.
x = hk.Flatten()(x)
x, penalty_term = hk_util.Linear(16, w_regularizer=l1_regularizer)(x)
penalties += penalty_term
x = jax.nn.relu(x)
# Final layer producing a score for each phone class.
scores, penalty_term = hk_util.Linear(len(meta.classes),
w_regularizer=l1_regularizer)(x)
penalties += penalty_term
return {'embedded': embedded,
'scores': scores,
'penalties': penalties}
def train_model(meta: Metadata,
dataset: phone_util.Dataset) -> hk_util.TrainedModel:
"""Train the model."""
model = hk_util.transform(functools.partial(model_fun, meta=meta))
# Split off a separate validation dataset.
dataset_val, dataset_train = dataset.split(meta.validation_fraction)
def generate_batches(dataset: phone_util.Dataset, batch_size: int):
"""Partition into batches. Examples in any partial batch are dropped."""
x, y = dataset.get_xy_arrays(meta.classes, shuffle=True)
batch_size = min(batch_size, len(x))
num_batches = len(x) // batch_size
batches_x = x[:num_batches * batch_size].reshape(
num_batches, batch_size, *x.shape[1:])
batches_y = y[:num_batches * batch_size].reshape(
num_batches, batch_size)
return batches_x, batches_y
train_x, train_y = generate_batches(dataset_train, batch_size=meta.batch_size)
t_eval_x, t_eval_y = generate_batches(dataset_train, batch_size=10000)
t_eval_batch = {'observed': t_eval_x[0], 'label': t_eval_y[0]}
v_eval_x, v_eval_y = generate_batches(dataset_val, batch_size=10000)
v_eval_batch = {'observed': v_eval_x[0], 'label': v_eval_y[0]}
# Initialize network and optimizer.
seed = np.uint64(random.getrandbits(64))
params = model.init(jax.random.PRNGKey(seed),
{'observed': train_x[0], 'label': train_y[0]})
optimizer = optax.adam(1e-3)
opt_state = optimizer.init(params)
# Print model summary.
print(hk_util.summarize_model(params))
def loss_fun(params, batch):
"""Training loss to optimize."""
outputs = model.apply(params, None, batch)
labels = hk.one_hot(batch['label'], len(meta.classes))
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(outputs['scores']))
softmax_xent /= labels.shape[0]
disperse = embedding_regularizer(outputs['embedded'], batch['label'], meta)
return softmax_xent + disperse + outputs['penalties']
@jax.jit
def train_step(params, opt_state, batch):
"""Learning update rule."""
grads = jax.grad(loss_fun)(params, batch)
updates, opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
@jax.jit
def accuracy(params, batch):
"""Evaluate classification accuracy."""
scores = model.apply(params, None, batch)['scores']
return jnp.mean(jnp.argmax(scores, axis=-1) == batch['label'])
# Training loop.
num_steps = len(train_x) * meta.num_epochs
step_digits = len(str(num_steps))
step = 0
for _ in range(meta.num_epochs):
for batch_x, batch_y in zip(train_x, train_y):
step += 1
train_batch = {'observed': batch_x, 'label': batch_y}
final_step = (step == num_steps)
if final_step or step % 500 == 0:
# Periodically evaluate classification accuracy on train & test sets.
train_accuracy = accuracy(params, t_eval_batch)
val_accuracy = accuracy(params, v_eval_batch)
train_accuracy, val_accuracy = jax.device_get(
(train_accuracy, val_accuracy))
print(f'[{step:-{step_digits}d}/{num_steps}] train acc = '
f'{train_accuracy:.4f}, val acc = {val_accuracy:.4f}')
params, opt_state = train_step(params, opt_state, train_batch)
return hk_util.TrainedModel(model, meta=meta, params=params)
def compute_2d_hists(labels: np.ndarray,
coords: np.ndarray,
num_classes: int,
num_bins: int = 50,
hist_smoothing_stddev=0.05) -> np.ndarray:
"""Compute a 2D histogram over [-1, 1] x [-1, 1] for each class."""
bin_width = 2 / num_bins
hist = np.empty((num_classes, num_bins, num_bins))
for i in range(num_classes):
mask = (labels == i)
coords_i = coords[mask].reshape(-1, 2)
x, y = coords_i[:, 0], coords_i[:, 1]
hist[i] = np.histogram2d(y, x, bins=num_bins, range=([-1, 1], [-1, 1]))[0]
# For a more reliable density estimate, smooth the histogram with a Gaussian
# kernel with stddev `hist_smoothing_stddev`.
sigma = hist_smoothing_stddev / bin_width
hist[i] = scipy.ndimage.gaussian_filter(hist[i], sigma)
# Normalize density to integrate to one.
hist[i] /= 1e-12 + bin_width**2 * hist[i].sum()
return hist
def get_subplot_shape(num_subplots: int) -> Tuple[int, int]:
subplot_rows = max(1, int(np.sqrt(num_subplots)))
subplot_cols = -(-num_subplots // subplot_rows)
return subplot_rows, subplot_cols
def plot_spatial_hists(labels: np.ndarray,
logits: np.ndarray,
classes: Sequence[str]) -> matplotlib.figure.Figure:
"""Plot histograms of how each phoneme class maps spatially to tactors."""
# Get the classes that are in both `classes` and MAPPING_TARGETS.
targeted_classes = [phone for phone in classes if phone in MAPPING_TARGETS]
logits = np.compress([phone in targeted_classes for phone in classes],
logits, axis=1)
targets = np.vstack([MAPPING_TARGETS[phone] for phone in targeted_classes])
# Map logits down to 2D coordinates by weighted average.
softmax_logits = np.exp2(4.0 * logits)
softmax_logits /= np.sum(softmax_logits, axis=1, keepdims=True)
coords = np.dot(softmax_logits, targets)
hist = compute_2d_hists(labels, coords, len(classes))
# Line segment data for plotting Voronoi cell boundaries.
voronoi_x = [[0.2454, 0.4907, 0.2454, -0.2454, -0.4907, -0.2454, 0.2454,
0.4907, 0.2454, -0.2454, -0.4907, -0.2454],
[0.4907, 0.2454, -0.2454, -0.4907, -0.2454, 0.2454, 0.5774, 1.0,
0.5774, -0.5774, -1.0, -0.5774]]
voronoi_y = [[-0.425, 0.0, 0.425, 0.425, 0.0, -0.425, -0.425, 0.0, 0.425,
0.425, 0.0, -0.425],
[0.0, 0.425, 0.425, 0.0, -0.425, -0.425, -1.0, 0.0, 1.0, 1.0,
0.0, -1.0]]
# For more plot contrast, allow a small number of pixels to clip.
vmax = np.percentile(hist, 99.7)
fig = matplotlib.figure.Figure(figsize=(9, 6))
subplot_rows, subplot_cols = get_subplot_shape(len(classes))
for i in range(len(classes)):
ax = fig.add_subplot(subplot_rows, subplot_cols, i + 1)
ax.imshow(hist[i], origin='lower', aspect='equal', interpolation='bicubic',
cmap='density', vmin=0.0, vmax=vmax, extent=(-1, 1, -1, 1))
ax.plot(voronoi_x, voronoi_y, 'w-', linewidth=0.7, alpha=0.9)
ax.plot(0.98 * targets[:, 0], 0.98 * targets[:, 1], 'ko', alpha=0.3)
ax.set_title(classes[i], fontsize=14)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
fig.suptitle('Spatial histograms', fontsize=15)
return fig
def draw_hexagon(ax, **kw) -> None:
"""Draws a hexagon centered at (0, 0) on axis `ax`."""
hexagon = np.exp(1j * (np.pi / 3) * np.arange(7))
ax.plot(np.imag(hexagon), np.real(hexagon), **kw)
def plot_embedded_hists(
labels: np.ndarray,
embedded: np.ndarray,
classes: Sequence[str],
) -> Tuple[matplotlib.figure.Figure, matplotlib.figure.Figure]:
"""Plot histograms of how each phoneme class maps in the embedding space."""
hist = compute_2d_hists(labels, embedded, len(classes))
# Make a figure that shows all classes together on the same plot.
fig_merged = matplotlib.figure.Figure(figsize=(6, 6))
ax = fig_merged.add_subplot(1, 1, 1)
# Get a set of distinct colors by sampling from the 'rainbow' colormap.
cmap = matplotlib.cm.get_cmap('rainbow')
n = np.arange(len(classes))
n = (11 * n) % 37 # Scramble order so that adjacent colors are dissimilar.
x = n / 37.0
colors = ['#%02X%02X%02X' % (r, g, b)
for r, g, b in (200 * cmap(x)[:, :3]).astype(int)]
for i in range(len(classes)):
level_thresholds = np.max(hist[i]) * np.array([0.5, 1])
kwargs = {'colors': colors[i], 'origin': 'lower',
'extent': (-1, 1, -1, 1)}
contour = ax.contour(hist[i], levels=level_thresholds, **kwargs)
ax.clabel(contour, fmt=classes[i], colors=colors[i])
ax.contourf(hist[i], levels=level_thresholds, alpha=0.2, **kwargs)
draw_hexagon(ax, color='k', linewidth=0.7)
ax.set_aspect('equal', 'datalim')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
fig_merged.suptitle('Embedding histograms merged', fontsize=15)
# Make another figure with a separate plot for each class.
# For more plot contrast, allow a small number of pixels to clip.
vmax = np.percentile(hist, 99.7)
fig_separate = matplotlib.figure.Figure(figsize=(9, 6))
subplot_rows, subplot_cols = get_subplot_shape(len(classes))
for i in range(len(classes)):
ax = fig_separate.add_subplot(subplot_rows, subplot_cols, i + 1)
ax.imshow(hist[i], origin='lower', aspect='equal', interpolation='bicubic',
cmap='density', vmin=0.0, vmax=vmax, extent=(-1, 1, -1, 1))
draw_hexagon(ax, color='w', linewidth=0.7, alpha=0.9)
ax.set_title(classes[i], fontsize=14)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
fig_separate.suptitle('Embedding histograms', fontsize=15)
return fig_merged, fig_separate
def plot_kernels(params: hk.Params,
layer_name: str,
max_num: int = 9) -> matplotlib.figure.Figure:
"""Plot kernels from layer `layer_name`.
Args:
params: Model params dict.
layer_name: String.
max_num: Integer, max number of kernels to plot. The kernels with the most
energy are plotted.
Returns:
Matplotlib figure.
"""
kernel = np.asarray(params[layer_name]['w'])
top_index = np.argsort(np.sum(kernel**2, axis=0))[::-1][:max_num]
num_taps = kernel.shape[0]
fig = matplotlib.figure.Figure(figsize=(9, 6))
subplot_rows, subplot_cols = get_subplot_shape(len(top_index))
for i in range(len(top_index)):
ax = fig.add_subplot(subplot_rows, subplot_cols, i + 1)
ax.plot(kernel[:, top_index[i]], '.-' if num_taps < 60 else '-')
ax.axhline(y=0, color='k')
fig.suptitle(f'{layer_name} kernels', fontsize=15)
return fig
def eval_model(model: hk_util.TrainedModel,
dataset: phone_util.Dataset,
output_dir: str) -> None:
"""Evaluate model and write HTML report to output directory."""
classes = model.meta.classes
x_test, y_test = dataset.get_xy_arrays(classes)
outputs = model(None, {'observed': x_test})
scores = np.asarray(outputs['scores'])
s = stats.MulticlassClassifierStats(len(classes))
s.accum(y_test, scores)
d_primes = s.d_prime
confusion = stats.Confusion(s.confusion_matrix, classes)
information_transfer = confusion.transfer_bits
mean_per_class_accuracy = np.mean(np.diag(confusion.normalized_matrix))
print('mean d-prime: %.4f' % d_primes.mean())
print('information transfer: %.2f' % information_transfer)
print('mean per class accuracy: %.4f' % mean_per_class_accuracy)
# Write HTML report.
def output_file(*args):
return os.path.join(output_dir, *args)
confusion.save_csv(output_file('confusion.csv'))
os.makedirs(os.path.join(output_dir, 'images'), exist_ok=True)
report = plot.HtmlReport(output_file('report.html'), 'Eval')
report.write('<p>training completed: %s</p>'
% datetime.datetime.now().strftime('%Y-%m-%d %H:%M'))
report.write('<p>mean d-prime: %.4f</p>' % d_primes.mean())
report.write('<p>information transfer: %.2f</p>'
% information_transfer)
report.write('<p>mean per class accuracy: %.4f</p>'
% mean_per_class_accuracy)
report.write('<pre>')
report.write(hk_util.summarize_model(model.params))
report.write('</pre>')
report.write('<table><tr><th>phone</th><th>d-prime</th></tr>')
for phone, d_prime in zip(classes, d_primes):
report.write(f'<tr><td>{phone}</td><td>{d_prime:.4f}</td></tr>')
report.write('</table>')
# Plot confusion matrix.
fig = plot.plot_matrix_figure(
confusion.normalized_matrix, classes, title='Normalized confusion matrix',
row_label='True phone', col_label='Predicted phone')
report.save_figure(output_file('images', 'confusion.png'), fig)
del fig
# Plot histograms of how each phoneme class maps spatially to tactors.
fig = plot_spatial_hists(y_test, scores, classes)
report.save_figure(output_file('images', 'spatial_hists.png'), fig)
del fig
if 'embedded' in outputs:
embedded = np.asarray(outputs['embedded'])
fig_merged, fig_separate = plot_embedded_hists(y_test, embedded, classes)
report.save_figure(output_file('images', 'embedded_hists_merged.png'),
fig_merged)
report.save_figure(output_file('images', 'embedded_hists.png'),
fig_separate)
del fig_merged
del fig_separate
# Plot kernels for each layer. This is useful for tuning regularization.
layer_names = sorted(model.params.keys())
for i, layer_name in enumerate(layer_names):
if layer_name.startswith('linear'):
fig = plot_kernels(model.params, layer_name)
report.save_figure(output_file('images', 'kernels%d.png' % i), fig)
del fig
report.close()
print('\nfile://' + os.path.abspath(os.path.join(output_dir, 'report.html')))
|
apache-2.0
|
JJGO/Parallel-Computing
|
4 Homework 4/3 Testing/2 Analysis OLD/13/time_analysis.py
|
1
|
3821
|
from matplotlib import pyplot
# from mpltools import style
import prettyplotlib as ppl
# from mpltools import layout
# style.use('ggplot')
# figsize = layout.figaspect(scale=1.2)
ps = [1,2,4,6,8,12,16,24,28,30,31,32,33,34]
# ps = [1,2,4,8]
# ps = range(1,9)
best_paths = {}
best_paths[13] = [0, 9, 1, 8, 7, 2, 3, 4, 11, 6, 10, 12, 5]
best_paths[15] = [0, 13, 9, 1, 8, 14, 7, 2, 3, 4, 11, 6, 10, 12, 5]
best_costs = {}
best_costs[13] = 778.211287
best_costs[15] = 782.989290
# paths_explored = { p : [] for p in ps}
n = 13
modes = ["DFS_DEBUG"]
for mode in modes:
times = { p : [] for p in ps}
changes = { p : [] for p in ps}
paths = { p : [] for p in ps}
pushes = { p : [] for p in ps}
print mode
with open("Results_"+mode+".txt",'r') as f:
# for i in range(9*8):
# for j in range(8):
# f.readline()
while(f.readline()):
n = int(f.readline()[:2])
p = int(f.readline().split()[0])
path,cost = f.readline().split(':')
path = map(int,path.split(',')[:-1])
cost = float(cost[:-1])
f.readline()
c,np,pu = [],[],[]
for i in range(p):
values = map(int,f.readline().split())
c.append(values[1])
np.append(values[2])
pu.append(values[3])
assert best_costs[n] == cost and best_paths[n] == path
time = f.readline().split()[-2]
times[p].append(float(time))
changes[p].append(c)
paths[p].append(np)
pushes[p].append(pu)
f.readline()
f.readline()
# print times
for k in times:
times[k] = sorted(times[k])[:20]
avs = { p : sum(times[p])/len(times[p]) for p in ps}
mins = { p : min(times[p]) for p in ps }
for p in ps:
time = times[p]
av = avs[p]
print "{0} & {1} - {2:.5f} {3:.5f} {4:.5f}".format(n,p,av,min(time),max(time))
# " ".join(map(str,time))
paths_per_thread = {}
total_paths = {}
for proc in ps:
x = [sum(p) for p in paths[proc]]
total_paths[proc] = sum(x)/len(x)
y = [sum(p)*1.0/len(p) for p in paths[proc]]
paths_per_thread[proc] = sum(y)/len(y)
print "{0} - {1:09.1f} {2:09.1f} {3:7d}".format(proc, paths_per_thread[proc], total_paths[proc]*1.0/proc, total_paths[proc])
# for k in sorted(times):
# print k,len(times[k])
ideals = map(lambda x: avs[ps[0]]/x,ps)
fig = pyplot.figure()
ppl.plot(ps,ideals, 'go-')
ppl.plot(ps,[avs[p] for p in ps], 'ro-')
ppl.plot(ps,[mins[p] for p in ps], 'bo-')
pyplot.xlabel('Processors')
pyplot.ylabel('Time (s)')
pyplot.title('Running Times for n = '+str(n))
pyplot.legend(['Ideal Case','Average Case','Best Case'],loc=3)
pyplot.yscale('log')
pyplot.savefig(str(n)+'_'+mode+'.png')
# pyplot.show()
SpeedUp = { p : avs[1]/avs[p] for p in ps }
Efficiency = { p : SpeedUp[p]/p for p in ps }
# for n in ns:
fig = pyplot.figure()
ppl.plot(ps,ps, 'go-')
ppl.plot(ps,[SpeedUp[p] for p in ps], 'ro-')
pyplot.xlabel('Processors')
pyplot.ylabel('SpeedUp')
pyplot.title('Comparison of SpeedUp')
pyplot.legend(['Ideal SpeedUp','n = '+str(n)],loc=2)
pyplot.savefig('SpeedUp_'+mode+'.png')
# pyplot.show()
fig = pyplot.figure()
ppl.plot(ps,[1]*len(ps), 'go-')
ppl.plot(ps,[Efficiency[p] for p in ps], 'ro-')
pyplot.xlabel('Processors')
pyplot.ylabel('Efficiency')
axes = pyplot.gca()
# axes.set_xlim([1,35])
axes.set_ylim([0,1.1])
pyplot.title('Comparison of Efficiencies')
pyplot.legend(['Ideal Efficiency','n = '+str(n)],loc=3)
pyplot.savefig('Efficiency_'+mode+'.png')
# pyplot.show()
|
gpl-2.0
|
NunoEdgarGub1/scikit-learn
|
sklearn/cluster/mean_shift_.py
|
106
|
14056
|
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
bsd-3-clause
|
eroicaleo/MachineLearningUW
|
course1/week2/quiz2/PredictingHousePrices.py
|
1
|
6201
|
# coding: utf-8
# #Fire up graphlab create
# In[35]:
import graphlab
# #Load some house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
# In[36]:
sales = graphlab.SFrame('home_data.gl/')
# In[37]:
sales
# #Exploring the data for housing sales
# The house price is correlated with the number of square feet of living space.
# In[38]:
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="sqft_living", y="price")
# #Create a simple regression model of sqft_living to price
# Split data into training and testing.
# We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
# In[39]:
train_data,test_data = sales.random_split(.8,seed=0)
# ##Build the regression model using only sqft_living as a feature
# In[40]:
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'])
# #Evaluate the simple model
# In[41]:
print test_data['price'].mean()
# In[42]:
print sqft_model.evaluate(test_data)
# RMSE of about \$255,170!
# #Let's show what our predictions look like
# Matplotlib is a Python plotting library that is also useful for plotting. You can install it with:
#
# 'pip install matplotlib'
# In[43]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# In[44]:
plt.plot(test_data['sqft_living'],test_data['price'],'.',
test_data['sqft_living'],sqft_model.predict(test_data),'-')
# Above: blue dots are original data, green line is the prediction from the simple regression.
#
# Below: we can view the learned regression coefficients.
# In[45]:
sqft_model.get('coefficients')
# #Explore other features in the data
#
# To build a more elaborate model, we will explore using more features.
# In[46]:
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
# In[47]:
sales[my_features].show()
# In[48]:
sales.show(view='BoxWhisker Plot', x='zipcode', y='price')
# Pull the bar at the bottom to view more of the data.
#
# 98039 is the most expensive zip code.
# #Build a regression model with more features
# In[49]:
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features)
# In[50]:
print my_features
# ##Comparing the results of the simple model with adding more features
# In[51]:
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
# The RMSE goes down from \$255,170 to \$179,508 with more features.
# #Apply learned models to predict prices of 3 houses
# The first house we will use is considered an "average" house in Seattle.
# In[52]:
house1 = sales[sales['id']=='5309101200']
# In[53]:
house1
# <img src="house-5309101200.jpg">
# In[54]:
print house1['price']
# In[55]:
print sqft_model.predict(house1)
# In[56]:
print my_features_model.predict(house1)
# In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
# ##Prediction for a second, fancier house
#
# We will now examine the predictions for a fancier house.
# In[57]:
house2 = sales[sales['id']=='1925069082']
# In[58]:
house2
# <img src="house-1925069082.jpg">
# In[59]:
print sqft_model.predict(house2)
# In[60]:
print my_features_model.predict(house2)
# In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
# ##Last house, super fancy
#
# Our last house is a very large one owned by a famous Seattleite.
# In[61]:
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
# <img src="house-bill-gates.jpg">
# In[62]:
print my_features_model.predict(graphlab.SFrame(bill_gates))
# The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
# In[63]:
house_zip_code = sales[sales["zipcode"] == "98039"]
# In[64]:
house_zip_code
# In[65]:
house_zip_code['price'].mean()
# In[66]:
house_zip_code_range = house_zip_code[house_zip_code.apply(lambda x: x['sqft_living'] > 2000.0 and x['sqft_living'] <= 4000.0)]
# In[67]:
house_zip_code_range.head()
# In[68]:
house_zip_code_range.num_rows()
# In[69]:
house_zip_code.num_rows()
# In[70]:
advanced_features = [
'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
'condition', # condition of house
'grade', # measure of quality of construction
'waterfront', # waterfront property
'view', # type of view
'sqft_above', # square feet above ground
'sqft_basement', # square feet in basement
'yr_built', # the year built
'yr_renovated', # the year renovated
'lat', 'long', # the lat-long of the parcel
'sqft_living15', # average sq.ft. of 15 nearest neighbors
'sqft_lot15', # average lot size of 15 nearest neighbors
]
# In[71]:
advanced_features_model = graphlab.linear_regression.create(train_data, target='price', features=advanced_features)
# In[72]:
print advanced_features_model.evaluate(test_data)
# In[73]:
advanced_features_model.evaluate(test_data)['rmse'] - my_features_model.evaluate(test_data)['rmse']
# In[ ]:
# In[ ]:
|
mit
|
bthirion/scikit-learn
|
examples/decomposition/plot_pca_3d.py
|
354
|
2432
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
|
bsd-3-clause
|
fbcotter/dataset_loading
|
dataset_loading/tensorboard_logging.py
|
1
|
2987
|
"""Simple example on how to log scalars and images to tensorboard without
tensor ops."""
__author__ = "Michael Gygli"
import tensorflow as tf
from io import StringIO
import matplotlib.pyplot as plt
import numpy as np
class Logger(object):
"""Logging in tensorboard without tensorflow ops."""
def __init__(self, log_dir='./log', writer=None):
"""Creates a summary writer logging to log_dir."""
if writer is None:
self.writer = tf.summary.FileWriter(log_dir)
else:
self.writer = writer
def log_scalar(self, tag, value, step):
"""Log a scalar variable.
Parameter
----------
tag : basestring
Name of the scalar
value
step : int
training iteration
"""
summary = tf.Summary(
value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def log_images(self, tag, images, step):
"""Logs a list of images."""
im_summaries = []
for nr, img in enumerate(images):
# Write the image to a string
s = StringIO()
plt.imsave(s, img, format='png')
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
im_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, nr),
image=img_sum))
# Create and write Summary
summary = tf.Summary(value=im_summaries)
self.writer.add_summary(summary, step)
def log_histogram(self, tag, values, step, bins=1000):
"""Logs the histogram of a list/vector of values."""
# Create histogram using numpy
if type(values) is list:
values = np.array(values)
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Requires equal number as bins, where the first goes from -DBL_MAX to
# bin_edges[1]. See
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
|
mit
|
ak681443/mana-deep
|
conv_ae/final_model.py
|
2
|
3922
|
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping ,LearningRateScheduler
from keras import regularizers
import tensorflow as tf
tf.python.control_flow_ops = tf
import os
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
from keras.optimizers import Adam
mypath = '/home/arvind/arvind/cleaned_data/train/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
images = []
for filen in files:
img = cv2.imread(mypath+filen)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.invert(img)
img = img/np.float32(np.max(img))
img[img>0.50] = 1
img[img!=1] = 0
img = cv2.resize(img, (224,224))
images.append(np.array([img]))
images.append(np.array([np.fliplr(img)]))
images.append(np.array([np.flipud(img)]))
images.append(np.array([np.fliplr(np.flipud(img))]))
print 'Training with ', len(images), ' samples'
autoencoder = None
with tf.device('/gpu:0'):
input_img = Input(shape=(224, 224, 1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224, 1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(16, 3, 3,activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Convolution2D(1 , 3, 3, activation='hard_sigmoid', border_mode='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer="adamax", loss='mse')
images_train = np.array(images[:-400])
images_test = np.array(images[-400:])
images_train = images_train.astype('float32')#/255. # / float(np.max(images_train))
images_test = images_test.astype('float32') #/255.# / float(np.max(images_test))
#print np.max(images_train_op[0])
#plt.imshow(np.reshape(images_train_op[50],(224,224)))
#plt.show()
# images_train_op = np.array(images_train_op)
# images_test_op = np.array(images_test_op)
images_train = np.reshape(images_train, (len(images_train),224, 224, 1))
images_test = np.reshape(images_test, (len(images_test), 224, 224, 1))
#images_train_op = np.reshape(images_train_op, (len(images_train_op), 224, 224, 1))
#images_test_op = np.reshape(images_test_op, (len(images_test_op), 224, 224, 1))
#print images_test_op.shape
#print images_train_op.shape
def step_decay(epoch):
initial_lrate = 0.1
lrate = 0.1 * 0.999 * epoch
if lrate < 0.0001:
return 0.0001
return lrate
def step_decay(epoch):
initial_lrate = 0.1
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
print autoencoder.summary()
autoencoder.load_weights("model_best_4.h5")
saver = ModelCheckpoint("model_iter.h5", monitor='loss', verbose=1, save_best_only=True, mode='auto')
stopper = EarlyStopping(monitor='loss', patience=50, verbose=1, mode='auto')
lrdecay = LearningRateScheduler(step_decay)
autoencoder.fit(images_train, images_train,
nb_epoch=10000,
batch_size=256,
shuffle=True,
validation_data=(images_test,images_test), verbose=1, callbacks=[saver, stopper])
# serialize model to JSON
model_json = autoencoder.to_json()
with open("model4.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
autoencoder.save_weights("model_best_4.h5")
print("Saved model to disk")
|
apache-2.0
|
gundramleifert/exp_tf
|
models/lp_stn/lp_stn_v1.py
|
1
|
19610
|
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
from itertools import chain
import tensorflow as tf
from util.spatial_transformer import transformer
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.contrib.layers import batch_norm
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from util.saver import PrefixSaver
from util.saver import get_op
from util.CharacterMapper import get_cm_lp
from util.variables import get_uninitialized_variables
from random import shuffle
import os
import time
import numpy as np
import matplotlib.pyplot as plt
nEpochs = 1000
batchSize = 16
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp0.lst'
INPUT_PATH_TRAIN1 = './private/lists/lp1.lst'
INPUT_PATH_TRAIN2 = './private/lists/lp2.lst'
INPUT_PATH_TRAIN3 = './private/lists/lp3.lst'
INPUT_PATH_TRAIN4 = './private/lists/lp4.lst'
INPUT_PATH_TRAIN5 = './private/lists/lp_enlarge_train.lst'
INPUT_PATH_VAL = './private/lists/lp_enlarge_val.lst'
cm = get_cm_lp()
# Additional NaC Channel
nClasses = cm.size() + 1
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
trainList1 = read_image_list(INPUT_PATH_TRAIN1)
trainList2 = read_image_list(INPUT_PATH_TRAIN2)
trainList3 = read_image_list(INPUT_PATH_TRAIN3)
trainList4 = read_image_list(INPUT_PATH_TRAIN4)
trainList5 = read_image_list(INPUT_PATH_TRAIN5)
numT = 16000
stepsPerEpocheTrain = numT / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
channels = 1
learningRate = 0.001
momentum = 0.9
#####This is the image size, for the READ part of the net.... so the output of the last STN
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
nHiddenLSTM1 = 256
imgInW = 512
imgInH = 512
def inference(images, seqLen, keep_prob, phase_train):
with tf.variable_scope('findPart') as scope:
imagesRes = tf.image.resize_bilinear(images, (128,128))
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(imagesRes, kernel, [1, 2, 2, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.tanh(pre_activation, name=scope.name)
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool1, kernel, [1, 2, 2, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.tanh(pre_activation, name=scope.name)
pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 128, 512], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 2, 2, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[512]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.tanh(pre_activation, name=scope.name)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
h_conv3_flat = tf.reshape(pool3, [batchSize, 2048])
with tf.variable_scope('ff1') as scope:
# h_conv2_flat = tf.reshape(imagesRes, [batchSize, 16384])
# W_fc_loc1 = tf.Variable(tf.zeros([128,20]), name='weights')
# b_fc_loc1 = tf.Variable(tf.zeros([20]), name='bias')
# W_fc_loc2 = tf.Variable(tf.zeros([20,6]), name='weights')
W_fc_loc1 = tf.Variable(tf.truncated_normal([2048, 20], stddev=5e-4), name='weights')
b_fc_loc1 = tf.Variable(tf.truncated_normal([20], stddev=5e-4), name='bias')
W_fc_loc2 = tf.Variable(tf.truncated_normal([20, 4], stddev=5e-4), name='weights')
# Use identity transformation as starting point
# initial = np.array([[0.5, 0, 128], [0, 0.1, 232]])
# s_x = tf.Variable(1.0, name='s_x',dtype='float32')
s_x = tf.Variable(0.5, name='s_x',dtype='float32')
# s_y = tf.Variable(1.0, name='s_y',dtype='float32')
s_y = tf.Variable(0.093, name='s_y',dtype='float32')
t_x = tf.Variable(0.0, name='t_x',dtype='float32')
t_y = tf.Variable(0.0, name='t_y',dtype='float32')
# r_1 = tf.constant(0.0, name="r_1")
# r_2 = tf.constant(0.0, name="r_2")
# b_fc_loc2 = [s_x, r_1, t_x, r_2, s_y, t_y]
b_fc_loc2 = [s_x, t_x, s_y, t_y]
# b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
# %% Define the two layer localisation network
h_fc_loc1 = tf.nn.tanh(tf.matmul(h_conv3_flat, W_fc_loc1) + b_fc_loc1)
# %% We can add dropout for regularizing and to reduce overfitting like so:
h_fc_loc1_drop = tf.nn.dropout(h_fc_loc1, keep_prob)
# %% Second layer
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1_drop, W_fc_loc2) + b_fc_loc2)
# print(h_fc_loc2[:,0].get_shape())
# print(tf.constant(0,dtype='float32', shape=[batchSize]).get_shape())
aff = [h_fc_loc2[:,0], tf.constant(0,dtype='float32', shape=[batchSize]), h_fc_loc2[:,1], tf.constant(0,dtype='float32', shape=[batchSize]), h_fc_loc2[:,2], h_fc_loc2[:,3]]
# print(aff)
finAff = tf.pack(aff)
finAff = tf.transpose(finAff, [1, 0])
# print(finAff.get_shape())
# %% We'll create a spatial transformer module to identify discriminative
# %% patches
out_size = (imgH, imgW)
stn_out = transformer(images, finAff, out_size)
stn_out = tf.reshape(stn_out, [batchSize, imgH, imgW, 1])
mean, var = tf.nn.moments(stn_out, axes=[1,2], keep_dims=True)
# print(mean.get_shape())
stn_out = tf.nn.batch_normalization(stn_out, mean=mean, variance=var, offset=None, scale=None, variance_epsilon=1e-6)
with tf.variable_scope('readPart') as scope:
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(stn_out, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN1")
conv1 = tf.nn.relu(conv1_bn, name=scope.name)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN2")
conv2 = tf.nn.relu(conv2_bn, name=scope.name)
norm2 = tf.nn.local_response_normalization(conv2, name='norm2')
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3_bn = batch_norm(pre_activation, decay=0.999, is_training=phase_train, scope="BN3")
conv3 = tf.nn.relu(conv3_bn, name=scope.name)
norm3 = tf.nn.local_response_normalization(conv3, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedFW = rnn_cell.DropoutWrapper(forwardH1, output_keep_prob=keep_prob)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedBW = rnn_cell.DropoutWrapper(backwardH1, output_keep_prob=keep_prob)
outputs, _, _ = bidirectional_rnn(droppedFW, droppedBW, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv, stn_out, imagesRes, finAff
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_sum(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
loss = control_flow_ops.with_dependencies([updates], loss)
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgInH, imgInW, channels))
# inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
# seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
seqLengths = imgW*tf.ones(shape=(batchSize))
keep_prob = tf.placeholder(tf.float32)
trainIN = tf.placeholder_with_default(tf.constant(False), [])
logits3d, seqAfterConv, stn_o, img_sub, f_aff = inference(inputX, seqLengths, keep_prob, trainIN)
loss = loss(logits3d, targetY, seqAfterConv)
# saver1 = PrefixSaver('readPart', './private/models/lpReadInit/')
dict1 = get_op('readPart')
# print(dict1)
saver1 = tf.train.Saver(dict1)
# vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# for var in vars:
# print(var.name)
# saver = tf.train.Saver()
#Optimize ONLY new vars.
toOpt = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='findPart')
print('To Train')
for v in toOpt:
print(v.name)
# optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss, var_list=toOpt)
optimizer = tf.train.AdamOptimizer().minimize(loss, var_list=toOpt)
# optimizer = tf.train.AdamOptimizer().minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
# tf.global_variables_initializer().run()
saver1.restore(session, './private/models/lpReadInit/checkpoint-14')
uVar = get_uninitialized_variables()
tf.variables_initializer(var_list=uVar).run()
uVarB = get_uninitialized_variables()
for var in uVarB:
print(var.name)
# ckpt = tf.train.get_checkpoint_state("./private/models/lp_stn1/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
#
# print(ckpt)
# workList = trainList5[:]
# workList = trainList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgInW,
# mvn=False)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, keep_prob: 1.0, trainIN: False}
# lossB, aErr, p, s_o, f_a = session.run([loss, err, pred, stn_o, f_aff], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cm.get_value(idx))
# print(res)
# print(f_a)
# # print(p)
# plt.imshow(s_o[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
if(epoch == 10):
trainList.extend(trainList1)
if (epoch == 30):
trainList.extend(trainList2)
if (epoch == 50):
trainList.extend(trainList3)
if (epoch == 70):
trainList.extend(trainList4)
if (epoch == 90):
trainList.extend(trainList5)
workList = trainList[:]
shuffle(workList)
workList = workList[0:numT]
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgInW,
mvn=False)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, keep_prob: 0.5, trainIN: True}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# lossB, aErr = session.run([loss, err], feed_dict=feedDict)
lossT += lossB
errT += aErr
# plt.imshow(s_o[0, :, :, 0], cmap=plt.cm.gray)
# plt.show()
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgInW,
mvn=False)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, keep_prob: 1.0, trainIN: False}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp_stn1/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
|
apache-2.0
|
matthew-tucker/mne-python
|
mne/viz/tests/test_topomap.py
|
5
|
6899
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from nose.tools import assert_true, assert_equal
from mne import io, read_evokeds, read_proj
from mne.io.constants import FIFF
from mne.channels import read_layout, make_eeg_layout
from mne.datasets import testing
from mne.time_frequency.tfr import AverageTFR
from mne.utils import slow_test
from mne.viz import plot_evoked_topomap, plot_projs_topomap
from mne.viz.topomap import _check_outlines
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
@slow_test
@testing.requires_testing_data
def test_plot_topomap():
"""Test topomap plotting
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
# evoked
warnings.simplefilter('always')
res = 16
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0))
ev_bad = evoked.pick_types(meg=False, eeg=True, copy=True)
ev_bad.pick_channels(ev_bad.ch_names[:2])
ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6) # auto, should plot EEG
assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
assert_raises(ValueError, ev_bad.plot_topomap, times=[-100]) # bad time
assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]]) # bad time
evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
plt.close('all')
mask = np.zeros_like(evoked.data, dtype=bool)
mask[[1, 5], :] = True
evoked.plot_topomap(None, ch_type='mag', outlines=None)
times = [0.1]
evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
evoked.plot_topomap(times, ch_type='planar1', res=res)
evoked.plot_topomap(times, ch_type='planar2', res=res)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
show_names=True, mask_params={'marker': 'x'})
plt.close('all')
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average=-1000)
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average='hahahahah')
p = evoked.plot_topomap(times, ch_type='grad', res=res,
show_names=lambda x: x.replace('MEG', ''),
image_interp='bilinear')
subplot = [x for x in p.get_children() if
isinstance(x, matplotlib.axes.Subplot)][0]
assert_true(all('MEG' not in x.get_text()
for x in subplot.get_children()
if isinstance(x, matplotlib.text.Text)))
# Test title
def get_texts(p):
return [x.get_text() for x in p.get_children() if
isinstance(x, matplotlib.text.Text)]
p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
assert_equal(len(get_texts(p)), 0)
p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
texts = get_texts(p)
assert_equal(len(texts), 1)
assert_equal(texts[0], 'Custom')
plt.close('all')
# delaunay triangulation warning
with warnings.catch_warnings(record=True): # can't show
warnings.simplefilter('always')
evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
proj='interactive') # projs have already been applied
# change to no-proj mode
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0), proj=False)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
np.repeat(.1, 50))
assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
with warnings.catch_warnings(record=True): # file conventions
warnings.simplefilter('always')
projs = read_proj(ecg_fname)
projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
plot_projs_topomap(projs, res=res)
plt.close('all')
for ch in evoked.info['chs']:
if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
if ch['eeg_loc'] is not None:
ch['eeg_loc'].fill(0)
ch['loc'].fill(0)
# Remove extra digitization point, so EEG digitization points
# correspond with the EEG electrodes
del evoked.info['dig'][85]
pos = make_eeg_layout(evoked.info).pos
pos, outlines = _check_outlines(pos, 'head')
# test 1: pass custom outlines without patch
def patch():
return Circle((0.5, 0.4687), radius=.46,
clip_on=True, transform=plt.gca().transAxes)
# test 2: pass custom outlines with patch callable
outlines['patch'] = patch
plot_evoked_topomap(evoked, times, ch_type='eeg', outlines='head')
# Remove digitization points. Now topomap should fail
evoked.info['dig'] = None
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
times, ch_type='eeg')
plt.close('all')
def test_plot_tfr_topomap():
"""Test plotting of TFR data
"""
import matplotlib.pyplot as plt
raw = _get_raw()
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
res=16)
plt.close('all')
def test_prepare_topo_plot():
"""Test obtaining 2D coordinates from 3D sensor locations"""
|
bsd-3-clause
|
AOSP-S4-KK/platform_external_chromium_org
|
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
|
26
|
11131
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
|
bsd-3-clause
|
MartinSavc/scikit-learn
|
doc/conf.py
|
210
|
8446
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
Vimos/scikit-learn
|
examples/bicluster/plot_spectral_biclustering.py
|
403
|
2011
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
|
bsd-3-clause
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/io/tests/test_json/test_ujson.py
|
5
|
53941
|
# -*- coding: utf-8 -*-
from unittest import TestCase
try:
import json
except ImportError:
import simplejson as json
import math
import nose
import platform
import sys
import time
import datetime
import calendar
import re
import decimal
from functools import partial
from pandas.compat import range, zip, StringIO, u
import pandas.json as ujson
import pandas.compat as compat
import numpy as np
from numpy.testing import (assert_array_equal,
assert_array_almost_equal_nulp,
assert_approx_equal)
import pytz
import dateutil
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex
import pandas.util.testing as tm
def _skip_if_python_ver(skip_major, skip_minor=None):
major, minor = sys.version_info[:2]
if major == skip_major and (skip_minor is None or minor == skip_minor):
raise nose.SkipTest("skipping Python version %d.%d" % (major, minor))
json_unicode = (json.dumps if sys.version_info[0] >= 3
else partial(json.dumps, encoding="utf-8"))
class UltraJSONTests(TestCase):
def test_encodeDecimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(decoded, 1337.1337)
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
def helper(expected_output, **encode_kwargs):
output = ujson.encode(input, **encode_kwargs)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, expected_output)
self.assertEqual(input, ujson.decode(output))
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded, ensure_ascii=True)
helper(not_html_encoded, ensure_ascii=False)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
def test_doubleLongIssue(self):
sut = {u('a'): -4342969734183514}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_doubleLongDecimalIssue(self):
sut = {u('a'): -12345678901234.56789012}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
self.assertEqual(sut, decoded)
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
self.assertEqual(sut, decoded)
def test_encodeNonCLocale(self):
import locale
savedlocale = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, 'it_IT.UTF-8')
except:
try:
locale.setlocale(locale.LC_NUMERIC, 'Italian_Italy')
except:
raise nose.SkipTest('Could not set locale for testing')
self.assertEqual(ujson.loads(ujson.dumps(4.78e60)), 4.78e60)
self.assertEqual(ujson.loads('4.78', precise_float=True), 4.78)
locale.setlocale(locale.LC_NUMERIC, savedlocale)
def test_encodeDecodeLongDecimal(self):
sut = {u('a'): -528656961.4399388}
encoded = ujson.dumps(sut, double_precision=15)
ujson.decode(encoded)
def test_decimalDecodeTestPrecise(self):
sut = {u('a'): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
self.assertEqual(sut, decoded)
def test_encodeDoubleTinyExponential(self):
num = 1e-40
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = 1e-100
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-45
self.assertEqual(num, ujson.decode(ujson.encode(num)))
num = -1e-145
self.assertTrue(np.allclose(num, ujson.decode(ujson.encode(num))))
def test_encodeDictWithUnicodeKeys(self):
input = {u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1"),
u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1")}
output = ujson.encode(input)
input = {u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1")}
output = ujson.encode(input)
pass
def test_encodeDoubleConversion(self):
input = math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeWithDecimal(self):
input = 1.0
output = ujson.encode(input)
self.assertEqual(output, "1.0")
def test_encodeDoubleNegConversion(self):
input = -math.pi
output = ujson.encode(input)
self.assertEqual(round(input, 5), round(json.loads(output), 5))
self.assertEqual(round(input, 5), round(ujson.decode(output), 5))
def test_encodeArrayOfNestedArrays(self):
input = [[[[]]]] * 20
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = np.array(input)
assert_array_equal(input, ujson.decode(output, numpy=True, dtype=input.dtype))
def test_encodeArrayOfDoubles(self):
input = [ 31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
#self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
def test_doublePrecisionTest(self):
input = 30.012345678901234
output = ujson.encode(input, double_precision = 15)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
output = ujson.encode(input, double_precision = 9)
self.assertEqual(round(input, 9), json.loads(output))
self.assertEqual(round(input, 9), ujson.decode(output))
output = ujson.encode(input, double_precision = 3)
self.assertEqual(round(input, 3), json.loads(output))
self.assertEqual(round(input, 3), ujson.decode(output))
def test_invalidDoublePrecision(self):
input = 30.12345678901234567890
self.assertRaises(ValueError, ujson.encode, input, double_precision = 20)
self.assertRaises(ValueError, ujson.encode, input, double_precision = -1)
# will throw typeError
self.assertRaises(TypeError, ujson.encode, input, double_precision = '9')
# will throw typeError
self.assertRaises(TypeError, ujson.encode, input, double_precision = None)
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, '"A string \\\\ \\/ \\b \\f \\n \\r \\t"')
self.assertEqual(input, ujson.decode(output))
pass
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeControlEscaping(self):
input = "\x19"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(input, dec)
self.assertEqual(enc, json_unicode(input))
def test_encodeUnicodeConversion2(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicodeSurrogatePair(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x90\x8d\x86"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeUnicode4BytesUTF8Highest(self):
_skip_if_python_ver(2, 5)
_skip_if_python_ver(2, 6)
input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input))
self.assertEqual(dec, json.loads(enc))
def test_encodeArrayInArray(self):
input = [[[[]]]]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeIntConversion(self):
input = 31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeIntNegConversion(self):
input = -31337
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeLongNegConversion(self):
input = -9223372036854775808
output = ujson.encode(input)
outputjson = json.loads(output)
outputujson = ujson.decode(output)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeListConversion(self):
input = [ 1, 2, 3, 4 ]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
assert_array_equal(np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeDictConversion(self):
input = { "k1": 1, "k2": 2, "k3": 3, "k4": 4 }
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeNoneConversion(self):
input = None
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeTrueConversion(self):
input = True
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_encodeFalseConversion(self):
input = False
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
def test_encodeDatetimeConversion(self):
ts = time.time()
input = datetime.datetime.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
expected = calendar.timegm(input.utctimetuple())
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeDateConversion(self):
ts = time.time()
input = datetime.date.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
tup = (input.year, input.month, input.day, 0, 0, 0)
expected = calendar.timegm(tup)
self.assertEqual(int(expected), json.loads(output))
self.assertEqual(int(expected), ujson.decode(output))
def test_encodeTimeConversion(self):
tests = [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
datetime.time(10, 12, 15, 343243, pytz.utc),
# datetime.time(10, 12, 15, 343243, dateutil.tz.gettz('UTC')), # this segfaults! No idea why.
]
for test in tests:
output = ujson.encode(test)
expected = '"%s"' % test.isoformat()
self.assertEqual(expected, output)
def test_nat(self):
input = NaT
assert ujson.encode(input) == 'null', "Expected null"
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise nose.SkipTest("numpy version < 1.7.0, is "
"{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
def test_datetime_units(self):
from pandas.lib import Timestamp
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
self.assertEqual(roundtrip, stamp.value // 10**9)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
self.assertEqual(roundtrip, stamp.value // 10**6)
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
self.assertEqual(roundtrip, stamp.value // 10**3)
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
self.assertEqual(roundtrip, stamp.value)
self.assertRaises(ValueError, ujson.encode, val, date_unit='foo')
def test_encodeToUTF8(self):
_skip_if_python_ver(2, 5)
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input, ensure_ascii=False)
dec = ujson.decode(enc)
self.assertEqual(enc, json_unicode(input, ensure_ascii=False))
self.assertEqual(dec, json.loads(enc))
def test_decodeFromUnicode(self):
input = u("{\"obj\": 31337}")
dec1 = ujson.decode(input)
dec2 = ujson.decode(str(input))
self.assertEqual(dec1, dec2)
def test_encodeRecursionMax(self):
# 8 is the max recursion depth
class O2:
member = 0
pass
class O1:
member = 0
pass
input = O1()
input.member = O2()
input.member.member = input
try:
output = ujson.encode(input)
assert False, "Expected overflow exception"
except(OverflowError):
pass
def test_encodeDoubleNan(self):
input = np.nan
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleInf(self):
input = np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleNegInf(self):
input = -np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_decodeJibberish(self):
input = "fdsa sda v9sa fdsa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayStart(self):
input = "["
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectStart(self):
input = "{"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayEnd(self):
input = "]"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeArrayDepthTooBig(self):
input = '[' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectEnd(self):
input = "}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeObjectDepthTooBig(self):
input = '{' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUnterminated(self):
input = "\"TESTING"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUntermEscapeSequence(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringBadEscape(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeTrueBroken(self):
input = "tru"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeFalseBroken(self):
input = "fa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNullBroken(self):
input = "n"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenDictKeyTypeLeakTest(self):
input = '{{1337:""}}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except ValueError as e:
continue
assert False, "Wrong exception"
def test_decodeBrokenDictLeakTest(self):
input = '{{"key":"}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeBrokenListLeakTest(self):
input = '[[[true'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeDictWithNoKey(self):
input = "{{{{31337}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoColonOrValue(self):
input = "{{{{\"key\"}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoValue(self):
input = "{{{{\"key\":}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNumericIntPos(self):
input = "31337"
self.assertEqual(31337, ujson.decode(input))
def test_decodeNumericIntNeg(self):
input = "-31337"
self.assertEqual(-31337, ujson.decode(input))
def test_encodeUnicode4BytesUTF8Fail(self):
_skip_if_python_ver(3)
input = "\xfd\xbf\xbf\xbf\xbf\xbf"
try:
enc = ujson.encode(input)
assert False, "Expected exception"
except OverflowError:
pass
def test_encodeNullCharacter(self):
input = "31337 \x00 1337"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
input = "\x00"
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
self.assertEqual('" \\u0000\\r\\n "', ujson.dumps(u(" \u0000\r\n ")))
pass
def test_decodeNullCharacter(self):
input = "\"31337 \\u0000 31337\""
self.assertEqual(ujson.decode(input), json.loads(input))
def test_encodeListLongConversion(self):
input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807, 9223372036854775807 ]
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(input, ujson.decode(output))
assert_array_equal(np.array(input), ujson.decode(output, numpy=True,
dtype=np.int64))
pass
def test_encodeLongConversion(self):
input = 9223372036854775807
output = ujson.encode(input)
self.assertEqual(input, json.loads(output))
self.assertEqual(output, json.dumps(input))
self.assertEqual(input, ujson.decode(output))
pass
def test_numericIntExp(self):
input = "1337E40"
output = ujson.decode(input)
self.assertEqual(output, json.loads(input))
def test_numericIntFrcExp(self):
input = "1.337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEPLUS(self):
input = "1337E+9"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpePLUS(self):
input = "1.337e+40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpE(self):
input = "1337E40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpe(self):
input = "1337e40"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpEMinus(self):
input = "1.337E-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_decodeNumericIntExpeMinus(self):
input = "1.337e-4"
output = ujson.decode(input)
self.assertAlmostEqual(output, json.loads(input))
def test_dumpToFile(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.getvalue())
def test_dumpToFileLikeObject(self):
class filelike:
def __init__(self):
self.bytes = ''
def write(self, bytes):
self.bytes += bytes
f = filelike()
ujson.dump([1, 2, 3], f)
self.assertEqual("[1,2,3]", f.bytes)
def test_dumpFileArgsError(self):
try:
ujson.dump([], '')
except TypeError:
pass
else:
assert False, 'expected TypeError'
def test_loadFile(self):
f = StringIO("[1,2,3,4]")
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = StringIO("[1,2,3,4]")
assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileLikeObject(self):
class filelike:
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
f = filelike()
self.assertEqual([1, 2, 3, 4], ujson.load(f))
f = filelike()
assert_array_equal(np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileArgsError(self):
try:
ujson.load("[]")
except TypeError:
pass
else:
assert False, "expected TypeError"
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encodeNumericOverflow(self):
try:
ujson.encode(12839128391289382193812939)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_encodeNumericOverflowNested(self):
for n in range(0, 100):
class Nested:
x = 12839128391289382193812939
nested = Nested()
try:
ujson.encode(nested)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_decodeNumberWith32bitSignBit(self):
#Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
boundary1 = 2**31
boundary2 = 2**32
docs = (
'{"id": 3590016419}',
'{"id": %s}' % 2**31,
'{"id": %s}' % 2**32,
'{"id": %s}' % ((2**32)-1),
)
results = (3590016419, 2**31, 2**32, 2**32-1)
for doc,result in zip(docs, results):
self.assertEqual(ujson.decode(doc)['id'], result)
def test_encodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
input = base * 1024 * 1024 * 2
output = ujson.encode(input)
def test_decodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
quote = compat.str_to_bytes("\"")
input = quote + (base * 1024 * 1024 * 2) + quote
output = ujson.decode(input)
def test_toDict(self):
d = {u("key"): 31337}
class DictTest:
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
self.assertEqual(dec, d)
def test_defaultHandler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
self.assertRaises(OverflowError, ujson.encode, _TestObject("foo"))
self.assertEqual('"foo"', ujson.encode(_TestObject("foo"),
default_handler=str))
def my_handler(obj):
return "foobar"
self.assertEqual('"foobar"', ujson.encode(_TestObject("foo"),
default_handler=my_handler))
def my_handler_raises(obj):
raise TypeError("I raise for anything")
with tm.assertRaisesRegexp(TypeError, "I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(obj):
return 42
self.assertEqual(
42, ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)))
def my_obj_handler(obj):
return datetime.datetime(2013, 2, 3)
self.assertEqual(
ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))),
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
l = [_TestObject("foo"), _TestObject("bar")]
self.assertEqual(json.loads(json.dumps(l, default=str)),
ujson.decode(ujson.encode(l, default_handler=str)))
class NumpyJSONTests(TestCase):
def testBool(self):
b = np.bool(True)
self.assertEqual(ujson.decode(ujson.encode(b)), b)
def testBoolArray(self):
inpt = np.array([True, False, True, True, False, True, False , False],
dtype=np.bool)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
assert_array_equal(inpt, outp)
def testInt(self):
num = np.int(2562010)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(127)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(2562010)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(2562010)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.int64(2562010)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
num = np.uint8(255)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(2562010)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(2562010)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
num = np.uint64(2562010)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testIntArray(self):
arr = np.arange(100, dtype=np.int)
dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
assert_array_equal(inpt, outp)
def testIntMax(self):
num = np.int(np.iinfo(np.int).max)
self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)
num = np.int8(np.iinfo(np.int8).max)
self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)
num = np.int16(np.iinfo(np.int16).max)
self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)
num = np.int32(np.iinfo(np.int32).max)
self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)
num = np.uint8(np.iinfo(np.uint8).max)
self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)
num = np.uint16(np.iinfo(np.uint16).max)
self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)
num = np.uint32(np.iinfo(np.uint32).max)
self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)
if platform.architecture()[0] != '32bit':
num = np.int64(np.iinfo(np.int64).max)
self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)
# uint64 max will always overflow as it's encoded to signed
num = np.uint64(np.iinfo(np.int64).max)
self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def testFloat(self):
num = np.float(256.2013)
self.assertEqual(np.float(ujson.decode(ujson.encode(num))), num)
num = np.float32(256.2013)
self.assertEqual(np.float32(ujson.decode(ujson.encode(num))), num)
num = np.float64(256.2013)
self.assertEqual(np.float64(ujson.decode(ujson.encode(num))), num)
def testFloatArray(self):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
dtypes = (np.float, np.float32, np.float64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt, double_precision=15)), dtype=dtype)
assert_array_almost_equal_nulp(inpt, outp)
def testFloatMax(self):
num = np.float(np.finfo(np.float).max/10)
assert_approx_equal(np.float(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
num = np.float32(np.finfo(np.float32).max/10)
assert_approx_equal(np.float32(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
num = np.float64(np.finfo(np.float64).max/10)
assert_approx_equal(np.float64(ujson.decode(ujson.encode(num, double_precision=15))), num, 15)
def testArrays(self):
arr = np.arange(100);
arr = arr.reshape((10, 10))
assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((5, 5, 4))
assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((100, 1))
assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
arr = np.arange(96);
arr = arr.reshape((2, 2, 2, 2, 3, 2))
assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
assert_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)
l = ['a', list(), dict(), dict(), list(),
42, 97.8, ['a', 'b'], {'key': 'val'}]
arr = np.array(l)
assert_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)
arr = np.arange(100.202, 200.202, 1, dtype=np.float32);
arr = arr.reshape((5, 5, 4))
outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
assert_array_almost_equal_nulp(arr, outp)
outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
assert_array_almost_equal_nulp(arr, outp)
def testArrayNumpyExcept(self):
input = ujson.dumps([42, {}, 'a'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps(['a', 'b', [], 'c'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, ['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{}, []])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, None])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 'b'}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps({'a': {'b': {'c': 42}}})
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
def testArrayNumpyLabelled(self):
input = {'a': []}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.empty((1, 0)) == output[0]).all())
self.assertTrue((np.array(['a']) == output[1]).all())
self.assertTrue(output[2] is None)
input = [{'a': 42}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
self.assertTrue((np.array([42]) == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a')]) == output[2]).all())
# py3 is non-determinstic on the ordering......
if not compat.PY3:
input = [{'a': 42, 'b':31}, {'a': 24, 'c': 99}, {'a': 2.4, 'b': 78}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue(output[1] is None)
self.assertTrue((np.array([u('a'), 'b']) == output[2]).all())
input = {1: {'a': 42, 'b':31}, 2: {'a': 24, 'c': 99}, 3: {'a': 2.4, 'b': 78}}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
expectedvals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3,2))
self.assertTrue((expectedvals == output[0]).all())
self.assertTrue((np.array(['1','2','3']) == output[1]).all())
self.assertTrue((np.array(['a', 'b']) == output[2]).all())
class PandasJSONTests(TestCase):
def testDataFrame(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
self.assertTrue((df == outp).values.all())
assert_array_equal(df.columns, outp.columns)
assert_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
assert_array_equal(df.columns, outp.columns)
assert_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
self.assertTrue((df == outp).values.all())
assert_array_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
self.assertTrue((df.values == outp.values).all())
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
self.assertTrue((df.transpose() == outp).values.all())
assert_array_equal(df.transpose().columns, outp.columns)
assert_array_equal(df.transpose().index, outp.index)
def testDataFrameNumpy(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
self.assertTrue((df == outp).values.all())
assert_array_equal(df.columns, outp.columns)
assert_array_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
self.assertTrue((df == outp).values.all())
assert_array_equal(df.columns, outp.columns)
assert_array_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"), numpy=True))
self.assertTrue((df.transpose() == outp).values.all())
assert_array_equal(df.transpose().columns, outp.columns)
assert_array_equal(df.transpose().index, outp.index)
def testDataFrameNested(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
nested = {'df1': df, 'df2': df.copy()}
exp = {'df1': ujson.decode(ujson.encode(df)),
'df2': ujson.decode(ujson.encode(df))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
'df2': ujson.decode(ujson.encode(df, orient="index"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
'df2': ujson.decode(ujson.encode(df, orient="records"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
'df2': ujson.decode(ujson.encode(df, orient="values"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
'df2': ujson.decode(ujson.encode(df, orient="split"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
def testDataFrameNumpyLabelled(self):
df = DataFrame([[1,2,3], [4,5,6]], index=['a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df), numpy=True, labelled=True))
self.assertTrue((df.T == outp).values.all())
assert_array_equal(df.T.columns, outp.columns)
assert_array_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"), numpy=True, labelled=True))
outp.index = df.index
self.assertTrue((df == outp).values.all())
assert_array_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"), numpy=True, labelled=True))
self.assertTrue((df == outp).values.all())
assert_array_equal(df.columns, outp.columns)
assert_array_equal(df.index, outp.index)
def testSeries(self):
s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
s.sort()
# column indexed
outp = Series(ujson.decode(ujson.encode(s)))
outp.sort()
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s), numpy=True))
outp.sort()
self.assertTrue((s == outp).values.all())
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
outp = Series(**dec)
self.assertTrue((s == outp).values.all())
self.assertTrue(s.name == outp.name)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
numpy=True))
outp = Series(**dec)
self.assertTrue((s == outp).values.all())
self.assertTrue(s.name == outp.name)
outp = Series(ujson.decode(ujson.encode(s, orient="records"), numpy=True))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="records")))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="values"), numpy=True))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="values")))
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="index")))
outp.sort()
self.assertTrue((s == outp).values.all())
outp = Series(ujson.decode(ujson.encode(s, orient="index"), numpy=True))
outp.sort()
self.assertTrue((s == outp).values.all())
def testSeriesNested(self):
s = Series([10, 20, 30, 40, 50, 60], name="series", index=[6,7,8,9,10,15])
s.sort()
nested = {'s1': s, 's2': s.copy()}
exp = {'s1': ujson.decode(ujson.encode(s)),
's2': ujson.decode(ujson.encode(s))}
self.assertTrue(ujson.decode(ujson.encode(nested)) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
's2': ujson.decode(ujson.encode(s, orient="split"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="split")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
's2': ujson.decode(ujson.encode(s, orient="records"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="records")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
's2': ujson.decode(ujson.encode(s, orient="values"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="values")) == exp)
exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
's2': ujson.decode(ujson.encode(s, orient="index"))}
self.assertTrue(ujson.decode(ujson.encode(nested, orient="index")) == exp)
def testIndex(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# column indexed
outp = Index(ujson.decode(ujson.encode(i)))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i), numpy=True))
self.assertTrue(i.equals(outp))
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
outp = Index(**dec)
self.assertTrue(i.equals(outp))
self.assertTrue(i.name == outp.name)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
outp = Index(**dec)
self.assertTrue(i.equals(outp))
self.assertTrue(i.name == outp.name)
outp = Index(ujson.decode(ujson.encode(i, orient="values")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="values"), numpy=True))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="records")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="records"), numpy=True))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="index")))
self.assertTrue(i.equals(outp))
outp = Index(ujson.decode(ujson.encode(i, orient="index"), numpy=True))
self.assertTrue(i.equals(outp))
def test_datetimeindex(self):
from pandas.tseries.index import date_range
rng = date_range('1/1/2000', periods=20)
encoded = ujson.encode(rng, date_unit='ns')
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
self.assertTrue(rng.equals(decoded))
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit='ns')))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
def test_decodeArrayTrailingCommaFail(self):
input = "[31337,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayLeadingCommaFail(self):
input = "[,31337]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayOnlyCommaFail(self):
input = "[,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayUnmatchedBracketFail(self):
input = "[]]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayEmpty(self):
input = "[]"
ujson.decode(input)
def test_decodeArrayOneItem(self):
input = "[31337]"
ujson.decode(input)
def test_decodeBigValue(self):
input = "9223372036854775807"
ujson.decode(input)
def test_decodeSmallValue(self):
input = "-9223372036854775808"
ujson.decode(input)
def test_decodeTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError as e:
pass
else:
assert False, "expected ValueError"
def test_decodeTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError as e:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeWithTrailingWhitespaces(self):
input = "{}\n\t "
ujson.decode(input)
def test_decodeWithTrailingNonWhitespaces(self):
try:
input = "{}\n\t a"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayWithBigInt(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayFaultyUnicode(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeFloatingPointAdditionalTests(self):
places = 15
self.assertAlmostEqual(-1.1234567893, ujson.loads("-1.1234567893"), places=places)
self.assertAlmostEqual(-1.234567893, ujson.loads("-1.234567893"), places=places)
self.assertAlmostEqual(-1.34567893, ujson.loads("-1.34567893"), places=places)
self.assertAlmostEqual(-1.4567893, ujson.loads("-1.4567893"), places=places)
self.assertAlmostEqual(-1.567893, ujson.loads("-1.567893"), places=places)
self.assertAlmostEqual(-1.67893, ujson.loads("-1.67893"), places=places)
self.assertAlmostEqual(-1.7893, ujson.loads("-1.7893"), places=places)
self.assertAlmostEqual(-1.893, ujson.loads("-1.893"), places=places)
self.assertAlmostEqual(-1.3, ujson.loads("-1.3"), places=places)
self.assertAlmostEqual(1.1234567893, ujson.loads("1.1234567893"), places=places)
self.assertAlmostEqual(1.234567893, ujson.loads("1.234567893"), places=places)
self.assertAlmostEqual(1.34567893, ujson.loads("1.34567893"), places=places)
self.assertAlmostEqual(1.4567893, ujson.loads("1.4567893"), places=places)
self.assertAlmostEqual(1.567893, ujson.loads("1.567893"), places=places)
self.assertAlmostEqual(1.67893, ujson.loads("1.67893"), places=places)
self.assertAlmostEqual(1.7893, ujson.loads("1.7893"), places=places)
self.assertAlmostEqual(1.893, ujson.loads("1.893"), places=places)
self.assertAlmostEqual(1.3, ujson.loads("1.3"), places=places)
def test_encodeBigSet(self):
s = set()
for x in range(0, 100000):
s.add(x)
ujson.encode(s)
def test_encodeEmptySet(self):
s = set()
self.assertEqual("[]", ujson.encode(s))
def test_encodeSet(self):
s = set([1,2,3,4,5,6,7,8,9])
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
self.assertTrue(v in s)
def _clean_dict(d):
return dict((str(k), v) for k, v in compat.iteritems(d))
if __name__ == '__main__':
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
gpl-2.0
|
pfnet/chainercv
|
chainercv/visualizations/vis_bbox.py
|
2
|
5273
|
import numpy as np
from chainercv.visualizations.vis_image import vis_image
def vis_bbox(img, bbox, label=None, score=None, label_names=None,
instance_colors=None, alpha=1., linewidth=3.,
sort_by_score=True, ax=None):
"""Visualize bounding boxes inside image.
Example:
>>> from chainercv.datasets import VOCBboxDataset
>>> from chainercv.datasets import voc_bbox_label_names
>>> from chainercv.visualizations import vis_bbox
>>> import matplotlib.pyplot as plt
>>> dataset = VOCBboxDataset()
>>> img, bbox, label = dataset[60]
>>> vis_bbox(img, bbox, label,
... label_names=voc_bbox_label_names)
>>> plt.show()
This example visualizes by displaying the same colors for bounding
boxes assigned to the same labels.
>>> from chainercv.datasets import VOCBboxDataset
>>> from chainercv.datasets import voc_bbox_label_names
>>> from chainercv.visualizations import vis_bbox
>>> from chainercv.visualizations.colormap import voc_colormap
>>> import matplotlib.pyplot as plt
>>> dataset = VOCBboxDataset()
>>> img, bbox, label = dataset[61]
>>> colors = voc_colormap(label + 1)
>>> vis_bbox(img, bbox, label,
... label_names=voc_bbox_label_names,
... instance_colors=colors)
>>> plt.show()
Args:
img (~numpy.ndarray): See the table below. If this is :obj:`None`,
no image is displayed.
bbox (~numpy.ndarray): See the table below.
label (~numpy.ndarray): See the table below. This is optional.
score (~numpy.ndarray): See the table below. This is optional.
label_names (iterable of strings): Name of labels ordered according
to label ids. If this is :obj:`None`, labels will be skipped.
instance_colors (iterable of tuples): List of colors.
Each color is RGB format and the range of its values is
:math:`[0, 255]`. The :obj:`i`-th element is the color used
to visualize the :obj:`i`-th instance.
If :obj:`instance_colors` is :obj:`None`, the red is used for
all boxes.
alpha (float): The value which determines transparency of the
bounding boxes. The range of this value is :math:`[0, 1]`.
linewidth (float): The thickness of the edges of the bounding boxes.
sort_by_score (bool): When :obj:`True`, instances with high scores
are always visualized in front of instances with low scores.
ax (matplotlib.axes.Axis): The visualization is displayed on this
axis. If this is :obj:`None` (default), a new axis is created.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
:obj:`bbox`, ":math:`(R, 4)`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`label`, ":math:`(R,)`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`score`, ":math:`(R,)`", :obj:`float32`, --
Returns:
~matploblib.axes.Axes:
Returns the Axes object with the plot for further tweaking.
"""
from matplotlib import pyplot as plt
if label is not None and not len(bbox) == len(label):
raise ValueError('The length of label must be same as that of bbox')
if score is not None and not len(bbox) == len(score):
raise ValueError('The length of score must be same as that of bbox')
if sort_by_score and score is not None:
order = np.argsort(score)
bbox = bbox[order]
score = score[order]
if label is not None:
label = label[order]
if instance_colors is not None:
instance_colors = np.array(instance_colors)[order]
# Returns newly instantiated matplotlib.axes.Axes object if ax is None
ax = vis_image(img, ax=ax)
# If there is no bounding box to display, visualize the image and exit.
if len(bbox) == 0:
return ax
if instance_colors is None:
# Red
instance_colors = np.zeros((len(bbox), 3), dtype=np.float32)
instance_colors[:, 0] = 255
instance_colors = np.array(instance_colors)
for i, bb in enumerate(bbox):
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
color = instance_colors[i % len(instance_colors)] / 255
ax.add_patch(plt.Rectangle(
xy, width, height, fill=False,
edgecolor=color, linewidth=linewidth, alpha=alpha))
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if len(caption) > 0:
ax.text(bb[1], bb[0],
': '.join(caption),
style='italic',
bbox={'facecolor': 'white', 'alpha': 0.7, 'pad': 10})
return ax
|
mit
|
yyjiang/scikit-learn
|
examples/manifold/plot_manifold_sphere.py
|
258
|
5101
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
cuguilke/Treelogy
|
Treelogy_Server/Treelogy_Identifier.py
|
1
|
5673
|
#!/usr/bin/env python
import numpy as np
import pickle
import sys
from time import gmtime, strftime
from sklearn.externals import joblib
#one must change 'cuguilke' to his own username
caffe_root = '/home/cuguilke/caffe/'
svm_root = caffe_root + 'SVM'
sys.path.insert(0, caffe_root + 'python')
import caffe
import os
#CPU mode
#caffe.set_mode_cpu()
#GPU mode
caffe.set_device(0)
caffe.set_mode_gpu()
class Treelogy_Identifier:
def __init__(self, ID):
global caffe_root
self.idle = True
self.ID = ID
self.caffe_root = caffe_root
if not os.path.isfile(self.caffe_root + 'models/finetune_flickr_style/tree_identification_v7.caffemodel'):
print "[" + self.get_time() + "] " + "WARNING: Treelogy_Identifier" + str(self.ID) + " is NOT initialized"
print "[" + self.get_time() + "] " + "You need to download pre-trained tree_identification_v7 CaffeNet model..."
else:
print "[" + self.get_time() + "] " + "Treelogy_Identifier " + str(self.ID) + " is initialized"
self.guesses = {} #result dictionary
self.image_filename = ""
self.net = caffe.Net(self.caffe_root + 'models/finetune_flickr_style/deploy.prototxt', self.caffe_root + 'models/finetune_flickr_style/tree_identification_v7.caffemodel', caffe.TEST)
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.svm_identifier = SVM_Tree_Identifier(self.ID)
def run(self, imagename, mode=1):
# mode: 0 -> CNN Result | 1 -> Merged SVM & CNN Result
print "[" + self.get_time() + "] " + "Treelogy_Identifier " + str(self.ID) + " is running..."
self.image_filename = imagename
self.guesses = {}
self.transformer.set_transpose('data', (2,0,1))
self.transformer.set_mean('data', np.load(self.caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
self.transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
self.transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# larger (max. 50) batch size may be used for faster computation of multiple images
# set net to batch size of 1
self.net.blobs['data'].reshape(1,3,227,227)
#Feed in the image (with some preprocessing) and classify with a forward pass.
#In order to feed your own images, put them under ~/caffe/examples/images/
self.net.blobs['data'].data[...] = self.transformer.preprocess('data', caffe.io.load_image(self.caffe_root + 'examples/images/' + self.image_filename))
out = self.net.forward()
fc6_feature_vector = self.net.blobs['fc6'].data[0]
predicted_class = "Predicted class is #{}.".format(out['prob'][0].argmax())
# load labels
imagenet_labels_filename = self.caffe_root + 'data/flickr_style/synset_words.txt'
try:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
except:
print "You should run Ilke Cugu's create_train_val_text_v4.py"
# sort top k predictions from softmax output
top_k = self.net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
label = labels[top_k]
self.net.forward()
# run SVM_Tree_Identifier with fc6_feature_vector
svm_prediction = self.svm_identifier.predict(fc6_feature_vector)
# add Caffe's predictions
for i in range(0,len(label)):
guess = str(label[i]).lower().split(" ")
guess_string = guess[1]
#guess_string = guess_string.replace("_", " ")
guess_list = guess_string.split("|") #separate common name & latin name
guess = {"name": guess_list[0], "latin_name": guess_list[1], "percentage": str("{0:.2f}".format(100 * self.net.blobs['prob'].data[0][top_k[i]]))}
self.guesses.update({(i+1): guess})
if mode == 1: # Merge results of Caffe & SVM
in_top5 = True
if self.guesses[1]["name"] != svm_prediction:
in_top5 = False
for i in range(2, 6):
if self.guesses[i]["name"] == svm_prediction:
in_top5 = True
temp = self.guesses[i]["latin_name"]
names = ["dummy","","","","",""]
latin_names = ["dummy","","","","",""]
for j in range(1,6):
names[j] = self.guesses[j]["name"]
latin_names[j] = self.guesses[j]["latin_name"]
# shift names
for j in range(2,6):
if names[j-1] != svm_prediction:
self.guesses[j]["name"] = names[j-1]
self.guesses[j]["latin_name"] = latin_names[j-1]
else:
break
# set top prediction as SVM's prediction
self.guesses[1]["name"] = svm_prediction
self.guesses[1]["latin_name"] = temp
break
if not in_top5:
self.guesses[5]["name"] = svm_prediction
self.guesses[5]["latin_name"] = ""
print "[" + self.get_time() + "] " + "Treelogy_Identifier " + str(self.ID) + " completed the identification"
def get_results(self):
return self.guesses
def get_time(self):
return strftime("%a, %d %b %Y %X", gmtime())
# Directory schema must be like this:
# ./-
# |-- SVM_Tree_Identifier.py
# |-- SVM/-
# |-- svmClassifier_Pickle_caffe_finetune_fc6_v9.p
# |-- synset_words_Pickle_caffe_finetune_fc6_v9.p
# |-- svmClassifier_Pickle_caffe_finetune_fc6_v9.p_01.npy.z
class SVM_Tree_Identifier:
def __init__(self, ID):
global svm_root
self.ID = ID
self.predicted = None
self.clf = joblib.load(svm_root + "/svmClassifier_Pickle_caffe_finetune_fc6_v9.p")
self.synset_words_dict = joblib.load(svm_root + "/synset_words_Pickle_caffe_finetune_fc6_v9.p")
self.terminate = False
def predict(self, featureVector):
instance = np.array(featureVector)
self.predicted = self.clf.predict([instance])
for k,l in self.synset_words_dict.items():
if l[1] == self.predicted[0]:
break
return (l[0].split('|')[0]).lower()
|
gpl-3.0
|
tosolveit/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
176
|
12155
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
bsd-3-clause
|
PrashntS/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
280
|
2541
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
bsd-3-clause
|
uhjish/seaborn
|
seaborn/utils.py
|
19
|
15509
|
"""Small plotting-related utility functions."""
from __future__ import print_function, division
import colorsys
import warnings
import os
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.colors as mplcol
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from .external.six.moves.urllib.request import urlopen, urlretrieve
def ci_to_errsize(cis, heights):
"""Convert intervals to error arguments relative to plot heights.
Parameters
----------
cis: 2 x n sequence
sequence of confidence interval limits
heights : n sequence
sequence of plot heights
Returns
-------
errsize : 2 x n array
sequence of error size relative to height values in correct
format as argument for plt.bar
"""
cis = np.atleast_2d(cis).reshape(2, -1)
heights = np.atleast_1d(heights)
errsize = []
for i, (low, high) in enumerate(np.transpose(cis)):
h = heights[i]
elow = h - low
ehigh = high - h
errsize.append([elow, ehigh])
errsize = np.asarray(errsize).T
return errsize
def pmf_hist(a, bins=10):
"""Return arguments to plt.bar for pmf-like histogram of an array.
Parameters
----------
a: array-like
array to make histogram of
bins: int
number of bins
Returns
-------
x: array
left x position of bars
h: array
height of bars
w: float
width of bars
"""
n, x = np.histogram(a, bins)
h = n / n.sum()
w = x[1] - x[0]
return x[:-1], h, w
def desaturate(color, prop):
"""Decrease the saturation channel of a color by some percent.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
prop : float
saturation channel of color will be multiplied by this value
Returns
-------
new_color : rgb tuple
desaturated color code in RGB tuple representation
"""
# Check inputs
if not 0 <= prop <= 1:
raise ValueError("prop must be between 0 and 1")
# Get rgb tuple rep
rgb = mplcol.colorConverter.to_rgb(color)
# Convert to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# Desaturate the saturation channel
s *= prop
# Convert back to rgb
new_color = colorsys.hls_to_rgb(h, l, s)
return new_color
def saturate(color):
"""Return a fully saturated color with the same hue.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
Returns
-------
new_color : rgb tuple
saturated color code in RGB tuple representation
"""
return set_hls_values(color, s=1)
def set_hls_values(color, h=None, l=None, s=None):
"""Independently manipulate the h, l, or s channels of a color.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
h, l, s : floats between 0 and 1, or None
new values for each channel in hls space
Returns
-------
new_color : rgb tuple
new color code in RGB tuple representation
"""
# Get rgb tuple representation
rgb = mplcol.colorConverter.to_rgb(color)
vals = list(colorsys.rgb_to_hls(*rgb))
for i, val in enumerate([h, l, s]):
if val is not None:
vals[i] = val
rgb = colorsys.hls_to_rgb(*vals)
return rgb
def axlabel(xlabel, ylabel, **kwargs):
"""Grab current axis and label it."""
ax = plt.gca()
ax.set_xlabel(xlabel, **kwargs)
ax.set_ylabel(ylabel, **kwargs)
def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or None (default), optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward).
trim : bool, optional
If true, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
_set_spine_position(ax_i.spines[side], ('outward', offset))
# Set the ticks appropriately
if bottom:
ax_i.xaxis.tick_top()
if top:
ax_i.xaxis.tick_bottom()
if left:
ax_i.yaxis.tick_right()
if right:
ax_i.yaxis.tick_left()
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = ax_i.get_xticks()
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = ax_i.get_yticks()
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks)
def offset_spines(offset=10, fig=None, ax=None):
"""Simple function to offset spines away from axes.
Use this immediately after creating figure and axes objects.
Offsetting spines after plotting or manipulating the axes
objects may result in loss of labels, ticks, and formatting.
Parameters
----------
offset : int, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine
Returns
-------
None
"""
warn_msg = "`offset_spines` is deprecated and will be removed in v0.5"
warnings.warn(warn_msg, UserWarning)
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for spine in ax_i.spines.values():
_set_spine_position(spine, ('outward', offset))
def _set_spine_position(spine, position):
"""
Set the spine's position without resetting an associated axis.
As of matplotlib v. 1.0.0, if a spine has an associated axis, then
spine.set_position() calls axis.cla(), which resets locators, formatters,
etc. We temporarily replace that call with axis.reset_ticks(), which is
sufficient for our purposes.
"""
axis = spine.axis
if axis is not None:
cla = axis.cla
axis.cla = axis.reset_ticks
spine.set_position(position)
if axis is not None:
axis.cla = cla
def _kde_support(data, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores
def ci(a, which=95, axis=None):
"""Return a percentile range from an array of values."""
p = 50 - which / 2, 50 + which / 2
return percentiles(a, p, axis)
def sig_stars(p):
"""Return a R-style significance string corresponding to p values."""
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
def get_dataset_names():
"""Report available example datasets, useful for reporting issues."""
# delayed import to not demand bs4 unless this function is actually used
from bs4 import BeautifulSoup
http = urlopen('https://github.com/mwaskom/seaborn-data/')
gh_list = BeautifulSoup(http)
return [l.text.replace('.csv', '')
for l in gh_list.find_all("a", {"class": "js-directory-link"})
if l.text.endswith('.csv')]
def get_data_home(data_home=None):
"""Return the path of the seaborn data directory.
This is used by the ``load_dataset`` function.
If the ``data_home`` argument is not specified, the default location
is ``~/seaborn-data``.
Alternatively, a different default location can be specified using the
environment variable ``SEABORN_DATA``.
"""
if data_home is None:
data_home = os.environ.get('SEABORN_DATA',
os.path.join('~', 'seaborn-data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_dataset(name, cache=True, data_home=None, **kws):
"""Load a dataset from the online repository (requires internet).
Parameters
----------
name : str
Name of the dataset (`name`.csv on
https://github.com/mwaskom/seaborn-data). You can obtain list of
available datasets using :func:`get_dataset_names`
cache : boolean, optional
If True, then cache data locally and use the cache on subsequent calls
data_home : string, optional
The directory in which to cache data. By default, uses ~/seaborn_data/
kws : dict, optional
Passed to pandas.read_csv
"""
path = "https://github.com/mwaskom/seaborn-data/raw/master/{0}.csv"
full_path = path.format(name)
if cache:
cache_path = os.path.join(get_data_home(data_home),
os.path.basename(full_path))
if not os.path.exists(cache_path):
urlretrieve(full_path, cache_path)
full_path = cache_path
df = pd.read_csv(full_path, **kws)
if df.iloc[-1].isnull().all():
df = df.iloc[:-1]
if not pandas_has_categoricals:
return df
# Set some columns as a categorical type with ordered levels
if name == "tips":
df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
if name == "flights":
df["month"] = pd.Categorical(df["month"], df.month.unique())
if name == "exercise":
df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
if name == "titanic":
df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"])
df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG"))
return df
def axis_ticklabels_overlap(labels):
"""Return a boolean for whether the list of ticklabels have overlaps.
Parameters
----------
labels : list of ticklabels
Returns
-------
overlap : boolean
True if any of the labels overlap.
"""
if not labels:
return False
try:
bboxes = [l.get_window_extent() for l in labels]
overlaps = [b.count_overlaps(bboxes) for b in bboxes]
return max(overlaps) > 1
except RuntimeError:
# Issue on macosx backend rasies an error in the above code
return False
def axes_ticklabels_overlap(ax):
"""Return booleans for whether the x and y ticklabels on an Axes overlap.
Parameters
----------
ax : matplotlib Axes
Returns
-------
x_overlap, y_overlap : booleans
True when the labels on that axis overlap.
"""
return (axis_ticklabels_overlap(ax.get_xticklabels()),
axis_ticklabels_overlap(ax.get_yticklabels()))
def categorical_order(values, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
values : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(values, "categories"):
order = values.categories
else:
try:
order = values.cat.categories
except (TypeError, AttributeError):
try:
order = values.unique()
except AttributeError:
order = pd.unique(values)
try:
np.asarray(values).astype(np.float)
order = np.sort(order)
except (ValueError, TypeError):
order = order
order = filter(pd.notnull, order)
return list(order)
|
bsd-3-clause
|
zooniverse/aggregation
|
experimental/penguins/clusterAnalysis/distance_.py
|
2
|
3492
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import sys
import cPickle as pickle
import math
import matplotlib.pyplot as plt
import pymongo
import urllib
import matplotlib.cbook as cbook
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
#from divisiveDBSCAN import DivisiveDBSCAN
#from divisiveDBSCAN_multi import DivisiveDBSCAN
#from clusterCompare import metric,metric2
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
penguins = pickle.load(open(base_directory+"/Databases/penguins_vote__.pickle","rb"))
#does this cluster have a corresponding cluster in the gold standard data?
#ie. does this cluster represent an actual penguin?
# #user penguins for first image - with 5 images
# print len(penguins[5][0])
# #user data
# print penguins[5][0][0]
# #gold standard data
# #print penguins[5][0][1]
#
# #users who annotated the first "penguin" in the first image
# print penguins[5][0][0][0][1]
# #and their corresponds points
# print penguins[5][0][0][0][0]
client = pymongo.MongoClient()
db = client['penguin_2014-10-22']
subject_collection = db["penguin_subjects"]
lowest_cluster = float("inf")
highest_cluster = -float('inf')
#print gold_standard
#RESET
max_users = 20
cluster_list = []
image = penguins[max_users][0]
for image in penguins[max_users]:
#first - create a list of ALL users - so we can figure out who has annotated a "penguin" or hasn't
zooniverse_id = image[0]
for cluster in image[1]:
X = np.mean(zip(*cluster[0])[0])
Y = np.mean(zip(*cluster[0])[1])
cluster_list.append((X,Y,cluster[1]))
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
object_id= str(subject["_id"])
image_path = base_directory+"/Databases/penguins/images/"+object_id+".JPG"
if not(os.path.isfile(image_path)):
urllib.urlretrieve(url, image_path)
image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+object_id+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
to_plot = False
for i in range(len(cluster_list)):
for j in range(i+1,len(cluster_list)):
users_1 = cluster_list[i][2]
users_2 = cluster_list[j][2]
overlap = len([u for u in users_1 if (u in users_2)])
if overlap == 1:
x_1,y_1 = cluster_list[i][0],cluster_list[i][1]
x_2,y_2 = cluster_list[j][0],cluster_list[j][1]
first_dist = math.sqrt((x_1-x_2)**2 + (y_1-y_2)**2)
min_dist = float("inf")
#find the closest or next-closest cluster
for k in range(len(cluster_list)):
if k in [i,j]:
continue
x_3,y_3 = cluster_list[k][0],cluster_list[k][1]
dist = math.sqrt((x_1-x_3)**2 + (y_1-y_3)**2)
min_dist = min(min_dist,dist)
if first_dist< (min_dist):
to_plot = True
print first_dist,min_dist
plt.plot((x_1,x_2),(y_1,y_2))
if to_plot:
plt.show()
plt.close()
else:
plt.close()
print "===="
|
apache-2.0
|
Edu-Glez/Bank_sentiment_analysis
|
env/lib/python3.6/site-packages/jupyter_core/tests/dotipython_empty/profile_default/ipython_console_config.py
|
24
|
21691
|
# Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# Reraise exceptions encountered loading IPython extensions?
# c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
#
# c.ZMQTerminalInteractiveShell.debug = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQTerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'mate -w'
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
#
# c.ZMQTerminalInteractiveShell.quiet = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQTerminalInteractiveShell.logappend = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
#
# c.KernelManager.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
|
apache-2.0
|
timmeinhardt/ProxImaL
|
proximal/examples/test_noise_est.py
|
2
|
1703
|
# Proximal
import sys
sys.path.append('../../')
from proximal.utils.utils import *
from proximal.utils.metrics import *
from proximal.lin_ops import *
from proximal.prox_fns import *
import cvxpy as cvx
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import matlab.engine
import StringIO
############################################################
# Load image
img = Image.open('./data/angela.jpg') # opens the file using Pillow - it's not an array yet
I = np.asfortranarray(im2nparray(img))
I = np.maximum(cv2.resize(I, (512, 512), interpolation=cv2.INTER_LINEAR), 0)
I = np.mean(I, axis=2)
I = np.asfortranarray(I)
I = np.maximum(I, 0.0)
# Generate observation
sigma_noise = 0.01
b = I + sigma_noise * np.random.randn(*I.shape)
# Display data
plt.ion()
plt.figure()
imgplot = plt.imshow(I, interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Original Image')
plt.show()
plt.figure()
imgplot = plt.imshow(np.clip(b, 0, 1), interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Observation')
plt.show()
# #Add compare with matlab version
# eng = matlab.engine.start_matlab()
# eng.addpath(r'../../apps/ihdr/code',nargout=0)
# vmat = matlab.double(b.tolist())
# method = matlab.double([2])
# result = np.array( eng.function_stdEst2D(vmat, method) )
# eng.quit()
# print 'Matlab Estimate:', result
# Estimate the noise
tic()
ndev = estimate_std(b, 'daub_replicate')
print('Estimation took: {0:.1f}ms'.format(toc()))
# Result
print('Noise estimate is: {0:1.4f}, Original was {1:1.4f}'.format(np.mean(ndev), sigma_noise))
# Wait until done
raw_input("Press Enter to continue...")
|
mit
|
pnedunuri/scikit-learn
|
examples/linear_model/plot_sgd_weighted_samples.py
|
344
|
1458
|
"""
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
tridesclous/tridesclous
|
doc/script_for_figures/generate_peeler_sequence_example.py
|
1
|
7284
|
"""
Find a good example of collision in striatum rat dataset.
"""
import os,shutil
from tridesclous import DataIO, CatalogueConstructor, Peeler
from tridesclous import download_dataset
from tridesclous.cataloguetools import apply_all_catalogue_steps
from tridesclous.peeler import make_prediction_signals
from tridesclous.tools import int32_to_rgba
from tridesclous.matplotlibplot import plot_waveforms_with_geometry, plot_features_scatter_2d
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
dirname = 'tdc_olfactory_bulb'
channels = [5,6,7,8]
def make_catalogue():
if os.path.exists(dirname):
shutil.rmtree(dirname)
dataio = DataIO(dirname=dirname)
localdir, filenames, params = download_dataset(name='olfactory_bulb')
dataio.set_data_source(type='RawData', filenames=filenames, **params)
dataio.add_one_channel_group(channels = channels)
cc = CatalogueConstructor(dataio=dataio)
params = {
'duration' : 300.,
'preprocessor' : {
'highpass_freq' : 300.,
'chunksize' : 1024,
'pad_width' : 100,
},
'peak_detector' : {
'peak_sign' : '-',
'relative_threshold' : 7.,
'peak_span' : 0.0005,
#~ 'peak_span' : 0.000,
},
'extract_waveforms' : {
'n_left' : -25,
'n_right' : 40,
'nb_max' : 10000,
},
'clean_waveforms' : {
'alien_value_threshold' : 60.,
},
'noise_snippet' : {
'nb_snippet' : 300,
},
'feature_method': 'global_pca',
'feature_kargs': {'n_components': 20},
'cluster_method': 'kmeans',
'cluster_kargs': {'n_clusters': 5},
'clean_cluster' : False,
'clean_cluster_kargs' : {},
}
apply_all_catalogue_steps(cc, params, verbose=True)
cc.order_clusters(by='waveforms_rms')
cc.move_cluster_to_trash(4)
cc.make_catalogue_for_peeler()
def apply_peeler():
dataio = DataIO(dirname=dirname)
catalogue = dataio.load_catalogue(chan_grp=0)
peeler = Peeler(dataio)
peeler.change_params(catalogue=catalogue,chunksize=1024)
peeler.run(progressbar=True)
def make_animation():
"""
Good example between 1.272 1.302
because collision
"""
dataio = DataIO(dirname=dirname)
catalogue = dataio.load_catalogue(chan_grp=0)
clusters = catalogue['clusters']
sr = dataio.sample_rate
# also a good one a 11.356 - 11.366
t1, t2 = 1.272, 1.295
i1, i2 = int(t1*sr), int(t2*sr)
spikes = dataio.get_spikes()
spike_times = spikes['index'] / sr
keep = (spike_times>=t1) & (spike_times<=t2)
spikes = spikes[keep]
print(spikes)
sigs = dataio.get_signals_chunk(i_start=i1, i_stop=i2,
signal_type='processed')
sigs = sigs.copy()
times = np.arange(sigs.shape[0])/dataio.sample_rate
def plot_spread_sigs(sigs, ax, ratioY = 0.02, **kargs):
#spread signals
sigs2 = sigs * ratioY
sigs2 += np.arange(0, len(channels))[np.newaxis, :]
ax.plot(times, sigs2, **kargs)
ax.set_ylim(-0.5, len(channels)-.5)
ax.set_xticks([])
ax.set_yticks([])
residuals = sigs.copy()
local_spikes = spikes.copy()
local_spikes['index'] -= i1
#~ fig, ax = plt.subplots()
#~ plot_spread_sigs(sigs, ax, color='k')
num_fig = 0
fig_pred, ax_predictions = plt.subplots()
ax_predictions.set_title('All detected templates from catalogue')
fig, ax = plt.subplots()
plot_spread_sigs(residuals, ax, color='k', lw=2)
ax.set_title('Initial filtered signals with spikes')
fig.savefig('../img/peeler_animation_sigs.png')
fig.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
for i in range(local_spikes.size):
label = local_spikes['cluster_label'][i]
color = clusters[clusters['cluster_label']==label]['color'][0]
color = int32_to_rgba(color, mode='float')
pred = make_prediction_signals(local_spikes[i:i+1], 'float32', (i2-i1, len(channels)), catalogue)
fig, ax = plt.subplots()
plot_spread_sigs(residuals, ax, color='k', lw=2)
plot_spread_sigs(pred, ax, color=color, lw=1.5)
ax.set_title('Dected spike label {}'.format(label))
fig.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
residuals -= pred
plot_spread_sigs(pred, ax_predictions, color=color, lw=1.5)
fig, ax = plt.subplots()
plot_spread_sigs(residuals, ax, color='k', lw=2)
plot_spread_sigs(pred, ax, color=color, lw=1, ls='--')
ax.set_title('New residual after substraction')
fig.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
fig_pred.savefig('png/fig{}.png'.format(num_fig))
num_fig += 1
#~ plt.show()
def make_catalogue_figure():
dataio = DataIO(dirname=dirname)
catalogue = dataio.load_catalogue(chan_grp=0)
clusters = catalogue['clusters']
geometry = dataio.get_geometry(chan_grp=0)
fig, ax = plt.subplots()
ax.set_title('Catalogue have 4 templates')
for i in range(clusters.size):
color = clusters[i]['color']
color = int32_to_rgba(color, mode='float')
waveforms = catalogue['centers0' ][i:i+1]
plot_waveforms_with_geometry(waveforms, channels, geometry,
ax=ax, ratioY=3, deltaX= 50, margin=50, color=color,
linewidth=3, alpha=1, show_amplitude=True, ratio_mad=8)
fig.savefig('../img/peeler_templates_for_animation.png')
#~ plt.show()
def make_pca_collision_figure():
dataio = DataIO(dirname=dirname)
cc = CatalogueConstructor(dataio=dataio)
clusters = cc.clusters
#~ plot_features_scatter_2d(cc, labels=None, nb_max=500)
#~ plot_features_scatter_2d
fig, ax = plt.subplots()
ax.set_title('Collision problem')
ax.set_aspect('equal')
features = cc.some_features
labels = cc.all_peaks[cc.some_peaks_index]['cluster_label']
for k in [0,1,2,3]:
color = clusters[clusters['cluster_label']==k]['color'][0]
color = int32_to_rgba(color, mode='float')
keep = labels==k
feat = features[keep]
print(np.unique(labels))
ax.plot(feat[:,0], feat[:,1], ls='None', marker='o', color=color, markersize=3, alpha=.5)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_xlabel('pca0')
ax.set_ylabel('pca1')
ax.annotate('Collision', xy=(17.6, -16.4), xytext=(30, -30),
arrowprops=dict(facecolor='black', shrink=0.05))
#~
fig.savefig('../img/collision_proble_pca.png')
#~ plt.show()
if __name__ == '__main__':
#~ make_catalogue()
#~ apply_peeler()
make_animation()
make_catalogue_figure()
make_pca_collision_figure()
#convert -delay 250 -loop 0 png/*.png ../img/peeler_animation.gif
|
mit
|
MohammedWasim/scikit-learn
|
sklearn/neighbors/tests/test_ball_tree.py
|
159
|
10196
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
|
bsd-3-clause
|
jlee335/cells
|
cells python/source_predator.py
|
1
|
8877
|
import math
import numpy as np
import random
import pickle
import matplotlib.pyplot as plt
import threading
from matplotlib.pyplot import plot, draw, ion, show
import os
from multiprocessing import Process
maparray = np.zeros((1024,1024,1))
totalscore = 0
fitness = 0
def sigmoid(x):
output = 1 / (1 + np.exp(-x))
return output
# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):
return output * (1 - output)
# def score(self) will determine the game rules eventually
def select(cell):
if (cell.score<(totalscore/50)):
cell.kill()
##################################CELL CLASS STARTS######################################
class prey:
x=0
y=0
life = False
def generate(self,x,y):
self.x = x
self.y = y
life = True
def kill(self):
life= False
def wander(self):
self.x += random.randrange(0,2)-1
self.y += random.randrange(0,2)-1
class cell: ## Cell class
#the 6 Input Nodes
x = 0
y = 0
up = 0
down = 0
left = 0
right = 0
layer_0 = []
score = 0
##Second hidden layer of Neural Net
synapse_0 = []
synapse_1 = []
synapse_2 = []
output = 0
life = False
visionarray = []
visionarray = np.zeros((129, 129))
def generate(self,x,y):
self.x = x
self.y = y
life = True
def kill(self):
life = False
def checklife(self):
return self.life
def getscore(self):
return self.score
def see(self):
#Setting VisionArray
rep = 0
for width in range(0,128):
for height in range(0,128):
if (self.x>64 and self.y>64 and self.x<895 and self.y<895):
self.visionarray[width, height] = maparray[self.x - 64 + width, self.y - 64 + height]
#Setting Up
for y in range(64,128):
rep += 1
for x in range(64-rep,64+rep):
if (self.visionarray[x,y] == 2):
self.up += 64-rep
rep = 0
#Setting Down
for y in range(0,64):
rep += 1
for x in range(0+rep,128-rep):
if (self.visionarray[x,y] == 2):
self.down += 64-rep
rep = 0
#Setting Left
for x in range(0,64):
rep += 1
for y in range(0+rep,128-rep):
if (self.visionarray[x,y] == 2):
self.left += 64-rep
rep = 0
#Setting Right
for x in range(64,128):
rep += 1
for y in range(64-rep,64+rep):
if (self.visionarray[x,y] == 2):
self.right += 64-rep
def decide(self):
# randomly initialize our weights with mean 0
# Feed forward through layers 0, 1, and 2
layer_0 = np.array([[self.x,self.y,self.up,self.down,self.left,self.right]])
layer_1 = sigmoid(np.dot(layer_0, self.synapse_0))
layer_2 = sigmoid(np.dot(layer_1, self.synapse_1))
self.output = sigmoid(np.dot(layer_2, self.synapse_2))
def act(self):
# The decision is to be made in which direction to move
#9 Sin (x) will be used determine movement
sin = math.sin(self.output*10)
cos = math.cos(self.output*10)
if sin>=0 and cos>=0:
self.y += 1
if sin<0 and cos<0:
self.y -= 1
if sin<0 and cos>=0:
self.x += 1
if sin>=0 and cos<0:
self.x -= 1
def mutate(self):
for i in range (0,len(self.synapse_0)-1):
self.synapse_0[i] += 10*(random.random()-0.5)
#print(i)
#print (self.synapse_0[i])
for i in range (0,len(self.synapse_1)-1):
self.synapse_1[i] += 10*(random.random()-0.5)
#print(i)
#print(self.synapse_1[i])
for i in range (0,len(self.synapse_2)-1):
self.synapse_2[i] += 10*(random.random()-0.5)
#print(i)
#print(self.synapse_2[i])
def addscore(self):
#New Rules will be to make the cells meet
if (maparray[self.x,self.y]==2):
self.score += 1
def initialize(self):
self.up = 0
self.down = 0
self.left = 0
self.right = 0
########################END OF CELL CLASS###########################
cells = []
for x in range(5):
cells.append(cell())
cells[x].synapse_0 = 2 * np.random.random((6, 6)) - 1
cells[x].synapse_1 = 2 * np.random.random((6, 4)) - 1
cells[x].synapse_2 = 2 * np.random.random((4, 1)) - 1
with open("save.file", "rb") as f:
for j in range(0, 5):
cells[j] = pickle.load(f)
# Now you can use the dump object as the original one
########################GAME LOOP BEGINNING#########################
#Generation of the cell list of 50
numcell = 20
preys = []
cells = []
for x in range(numcell):
cells.append(cell())
preys.append(prey())
cells[x].synapse_0 = 2 * np.random.random((6, 6)) - 1
cells[x].synapse_1 = 2 * np.random.random((6, 4)) - 1
cells[x].synapse_2 = 2 * np.random.random((4, 1)) - 1
#Initial generation of random weight values for ANN
#Generation loop beginning
for f in range(500): ##Every Generation Loop
for i in range(numcell):
#Generate Cells in Area
preys[i].generate(random.randrange(500,700),random.randrange(500,700))
cells[i].generate(random.randrange(500,700),random.randrange(500,700))
for z in range (0,150): ##Generation consists of 10000 ANN processes
print(z)
#see-> loads input, decide-> process through ANN, act-> output to action, score-> give score, initialize-> reset vis.
for i in range(numcell):
if (cells[i].x>1023):
cells[i].x = 1023
if (cells[i].y>1023):
cells[i].y = 1023
if (cells[i].x < 0):
cells[i].x = 0
if (cells[i].y < 0):
cells[i].y = 0
maparray[cells[i].x, cells[i].y] = 1
for i in range(numcell):
if (preys[i].x>1023):
preys[i].x = 1023
if (preys[i].y>1023):
preys[i].y = 1023
if (preys[i].x < 0):
preys[i].x = 0
if (preys[i].y < 0):
preys[i].y = 0
maparray[preys[i].x, preys[i].y] = 2
for i in range(numcell):
cells[i].see()
for i in range(numcell):
cells[i].decide()
for i in range(numcell):
maparray[cells[i].x,cells[i].y] = 0
for i in range(numcell):
cells[i].act()
for i in range(numcell):
maparray[preys[i].x,preys[i].y] = 0
for i in range(numcell):
preys[i].wander()
for i in range(numcell):
cells[i].addscore()
for i in range(numcell):
cells[i].initialize()
for i in range(numcell):
plt.scatter(cells[i].x, cells[i].y, s=2, c=1, alpha=0.5)
plt.scatter(preys[i].x, preys[i].y, s=2, c="g", alpha=0.5)
for i in range(0, numcell):
fitness += cells[i].score
#now, after 10000 movements, the cells will be:
#given average score
#latter half will be life = False
#In 25 length array, the cell numbers of those deceased will be remembered
#Each of survivors will be copied into the 25length array.
with open("save.file", "wb") as f:
for j in range(0, numcell):
pickle.dump(cells[j], f, pickle.HIGHEST_PROTOCOL)
# plt.show()
#RANDOM MUTATION WILL OCCUR
for j in range(0, numcell):
print("Score")
print(cells[j].score)
for i in range(0,numcell):
totalscore += cells[i].score
averagescore = totalscore/50
deadlist = []
for i in range(numcell):
if (cells[i].getscore()<averagescore):
cells[i].kill()
deadlist.append(i)
k = 0
for j in range(numcell):
if (cells[j].checklife() == True):
cells[deadlist[k]] = cells[j]
k+= 1
k=0
for i in range(numcell):
randnum = random.random()
if (randnum>0.7):
cells[i].mutate()
if (cells[i].score == 0):
cells[i].mutate()
plt.show()
#Next objectives
#After wanted generations, save/load ANNs of each cell
#Graphical interface shwoing cell movements
#Set what determines the value of Up, Down, Left, Right
for j in range(0,numcell):
print (cells[j].synapse_0)
print (cells[j].synapse_1)
print (cells[j].synapse_2)
for j in range(0,numcell):
print (cells[j].score)
print("Fitness")
print(fitness)
|
apache-2.0
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Phylo/BaseTree.py
|
1
|
45007
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Base classes for Bio.Phylo objects.
All object representations for phylogenetic trees should derive from these base
classes in order to use the common methods defined on them.
"""
__docformat__ = "restructuredtext en"
import sys
# Add path to Bio
sys.path.append('../..')
from Bio._py3k import zip
from Bio._py3k import filter
from Bio._py3k import basestring
from Bio._py3k import unicode
import collections
import copy
import itertools
import random
import re
from Bio import _utils
# General tree-traversal algorithms
def _level_traverse(root, get_children):
"""Traverse a tree in breadth-first (level) order."""
Q = collections.deque([root])
while Q:
v = Q.popleft()
yield v
Q.extend(get_children(v))
def _preorder_traverse(root, get_children):
"""Traverse a tree in depth-first pre-order (parent before children)."""
def dfs(elem):
yield elem
for v in get_children(elem):
for u in dfs(v):
yield u
for elem in dfs(root):
yield elem
def _postorder_traverse(root, get_children):
"""Traverse a tree in depth-first post-order (children before parent)."""
def dfs(elem):
for v in get_children(elem):
for u in dfs(v):
yield u
yield elem
for elem in dfs(root):
yield elem
def _sorted_attrs(elem):
"""Get a flat list of elem's attributes, sorted for consistency."""
singles = []
lists = []
# Sort attributes for consistent results
for attrname, child in sorted(elem.__dict__.items(),
key=lambda kv: kv[0]):
if child is None:
continue
if isinstance(child, list):
lists.extend(child)
else:
singles.append(child)
return (x for x in singles + lists
if isinstance(x, TreeElement))
# Factory functions to generalize searching for clades/nodes
def _identity_matcher(target):
"""Match a node to the target object by identity."""
def match(node):
return (node is target)
return match
def _class_matcher(target_cls):
"""Match a node if it's an instance of the given class."""
def match(node):
return isinstance(node, target_cls)
return match
def _string_matcher(target):
def match(node):
return unicode(node) == target
return match
def _attribute_matcher(kwargs):
"""Match a node by specified attribute values.
``terminal`` is a special case: True restricts the search to external (leaf)
nodes, False restricts to internal nodes, and None allows all tree elements
to be searched, including phyloXML annotations.
Otherwise, for a tree element to match the specification (i.e. for the
function produced by `_attribute_matcher` to return True when given a tree
element), it must have each of the attributes specified by the keys and
match each of the corresponding values -- think 'and', not 'or', for
multiple keys.
"""
def match(node):
if 'terminal' in kwargs:
# Special case: restrict to internal/external/any nodes
kwa_copy = kwargs.copy()
pattern = kwa_copy.pop('terminal')
if (pattern is not None and
(not hasattr(node, 'is_terminal') or
node.is_terminal() != pattern)):
return False
else:
kwa_copy = kwargs
for key, pattern in kwa_copy.items():
# Nodes must match all other specified attributes
if not hasattr(node, key):
return False
target = getattr(node, key)
if isinstance(pattern, basestring):
return (isinstance(target, basestring) and
re.match(pattern + '$', target))
if isinstance(pattern, bool):
return (pattern == bool(target))
if isinstance(pattern, int):
return (pattern == target)
if pattern is None:
return (target is None)
raise TypeError('invalid query type: %s' % type(pattern))
return True
return match
def _function_matcher(matcher_func):
"""Safer attribute lookup -- returns False instead of raising an error."""
def match(node):
try:
return matcher_func(node)
except (LookupError, AttributeError, ValueError, TypeError):
return False
return match
def _object_matcher(obj):
"""Retrieve a matcher function by passing an arbitrary object.
i.e. passing a `TreeElement` such as a `Clade` or `Tree` instance returns an
identity matcher, passing a type such as the `PhyloXML.Taxonomy` class
returns a class matcher, and passing a dictionary returns an attribute
matcher.
The resulting 'match' function returns True when given an object matching
the specification (identity, type or attribute values), otherwise False.
This is useful for writing functions that search the tree, and probably
shouldn't be used directly by the end user.
"""
if isinstance(obj, TreeElement):
return _identity_matcher(obj)
if isinstance(obj, type):
return _class_matcher(obj)
if isinstance(obj, basestring):
return _string_matcher(obj)
if isinstance(obj, dict):
return _attribute_matcher(obj)
if callable(obj):
return _function_matcher(obj)
raise ValueError("%s (type %s) is not a valid type for comparison."
% (obj, type(obj)))
def _combine_matchers(target, kwargs, require_spec):
"""Merge target specifications with keyword arguments.
Dispatch the components to the various matcher functions, then merge into a
single boolean function.
"""
if not target:
if not kwargs:
if require_spec:
raise ValueError("you must specify a target object or keyword "
"arguments.")
return lambda x: True
return _attribute_matcher(kwargs)
match_obj = _object_matcher(target)
if not kwargs:
return match_obj
match_kwargs = _attribute_matcher(kwargs)
return (lambda x: match_obj(x) and match_kwargs(x))
def _combine_args(first, *rest):
"""Convert ``[targets]`` or ``*targets`` arguments to a single iterable.
This helps other functions work like the built-in functions `max` and
`min`.
"""
# Background: is_monophyletic takes a single list or iterable (like the
# same method in Bio.Nexus.Trees); root_with_outgroup and common_ancestor
# take separate arguments. This mismatch was in the initial release and I
# didn't notice the inconsistency until after Biopython 1.55. I can think
# of cases where either style is more convenient, so let's support both
# (for backward compatibility and consistency between methods).
if hasattr(first, '__iter__') and not (isinstance(first, TreeElement) or
isinstance(first, type) or
isinstance(first, basestring) or
isinstance(first, dict)):
# `terminals` is an iterable of targets
if rest:
raise ValueError("Arguments must be either a single list of "
"targets, or separately specified targets "
"(e.g. foo(t1, t2, t3)), but not both.")
return first
# `terminals` is a single target -- wrap in a container
return itertools.chain([first], rest)
# Class definitions
class TreeElement(object):
"""Base class for all Bio.Phylo classes."""
def __repr__(self):
"""Show this object's constructor with its primitive arguments."""
def pair_as_kwarg_string(key, val):
if isinstance(val, basestring):
return "%s='%s'" % (key, _utils.trim_str(unicode(val), 60,
u'...'))
return "%s=%s" % (key, val)
return u'%s(%s)' % (self.__class__.__name__,
', '.join(pair_as_kwarg_string(key, val)
for key, val in sorted(self.__dict__.items())
if val is not None and
type(val) in (
str, int, float, bool, unicode)
))
__str__ = __repr__
class TreeMixin(object):
"""Methods for Tree- and Clade-based classes.
This lets `Tree` and `Clade` support the same traversal and searching
operations without requiring Clade to inherit from Tree, so Clade isn't
required to have all of Tree's attributes -- just ``root`` (a Clade
instance) and ``is_terminal``.
"""
# Traversal methods
def _filter_search(self, filter_func, order, follow_attrs):
"""Perform a BFS or DFS traversal through all elements in this tree.
:returns: generator of all elements for which `filter_func` is True.
"""
order_opts = {'preorder': _preorder_traverse,
'postorder': _postorder_traverse,
'level': _level_traverse}
try:
order_func = order_opts[order]
except KeyError:
raise ValueError("Invalid order '%s'; must be one of: %s"
% (order, tuple(order_opts)))
if follow_attrs:
get_children = _sorted_attrs
root = self
else:
get_children = lambda elem: elem.clades
root = self.root
return filter(filter_func, order_func(root, get_children))
def find_any(self, *args, **kwargs):
"""Return the first element found by find_elements(), or None.
This is also useful for checking whether any matching element exists in
the tree, and can be used in a conditional expression.
"""
hits = self.find_elements(*args, **kwargs)
try:
return next(hits)
except StopIteration:
return None
def find_elements(self, target=None, terminal=None, order='preorder',
**kwargs):
"""Find all tree elements matching the given attributes.
The arbitrary keyword arguments indicate the attribute name of the
sub-element and the value to match: string, integer or boolean. Strings
are evaluated as regular expression matches; integers are compared
directly for equality, and booleans evaluate the attribute's truth value
(True or False) before comparing. To handle nonzero floats, search with
a boolean argument, then filter the result manually.
If no keyword arguments are given, then just the class type is used for
matching.
The result is an iterable through all matching objects, by depth-first
search. (Not necessarily the same order as the elements appear in the
source file!)
:Parameters:
target : TreeElement instance, type, dict, or callable
Specifies the characteristics to search for. (The default,
TreeElement, matches any standard Bio.Phylo type.)
terminal : bool
A boolean value to select for or against terminal nodes (a.k.a.
leaf nodes). True searches for only terminal nodes, False
excludes terminal nodes, and the default, None, searches both
terminal and non-terminal nodes, as well as any tree elements
lacking the ``is_terminal`` method.
order : {'preorder', 'postorder', 'level'}
Tree traversal order: 'preorder' (default) is depth-first
search, 'postorder' is DFS with child nodes preceding parents,
and 'level' is breadth-first search.
Example
-------
>>> from Bio.Phylo.IO import PhyloXMIO
>>> phx = PhyloXMLIO.read('phyloxml_examples.xml')
>>> matches = phx.phylogenies[5].find_elements(code='OCTVU')
>>> next(matches)
Taxonomy(code='OCTVU', scientific_name='Octopus vulgaris')
"""
if terminal is not None:
kwargs['terminal'] = terminal
is_matching_elem = _combine_matchers(target, kwargs, False)
return self._filter_search(is_matching_elem, order, True)
def find_clades(self, target=None, terminal=None, order='preorder',
**kwargs):
"""Find each clade containing a matching element.
That is, find each element as with find_elements(), but return the
corresponding clade object. (This is usually what you want.)
:returns: an iterable through all matching objects, searching
depth-first (preorder) by default.
"""
def match_attrs(elem):
orig_clades = elem.__dict__.pop('clades')
found = elem.find_any(target, **kwargs)
elem.clades = orig_clades
return (found is not None)
if terminal is None:
is_matching_elem = match_attrs
else:
def is_matching_elem(elem):
return ((elem.is_terminal() == terminal) and
match_attrs(elem))
return self._filter_search(is_matching_elem, order, False)
def get_path(self, target=None, **kwargs):
"""List the clades directly between this root and the given target.
:returns: list of all clade objects along this path, ending with the
given target, but excluding the root clade.
"""
# Only one path will work -- ignore weights and visits
path = []
match = _combine_matchers(target, kwargs, True)
def check_in_path(v):
if match(v):
path.append(v)
return True
elif v.is_terminal():
return False
for child in v:
if check_in_path(child):
path.append(v)
return True
return False
if not check_in_path(self.root):
return None
return path[-2::-1]
def get_nonterminals(self, order='preorder'):
"""Get a list of all of this tree's nonterminal (internal) nodes."""
return list(self.find_clades(terminal=False, order=order))
def get_terminals(self, order='preorder'):
"""Get a list of all of this tree's terminal (leaf) nodes."""
return list(self.find_clades(terminal=True, order=order))
def trace(self, start, finish):
"""List of all clade object between two targets in this tree.
Excluding `start`, including `finish`.
"""
mrca = self.common_ancestor(start, finish)
fromstart = mrca.get_path(start)[-2::-1]
to = mrca.get_path(finish)
return fromstart + [mrca] + to
# Information methods
def common_ancestor(self, targets, *more_targets):
"""Most recent common ancestor (clade) of all the given targets.
Edge cases:
- If no target is given, returns self.root
- If 1 target is given, returns the target
- If any target is not found in this tree, raises a ValueError
"""
paths = [self.get_path(t)
for t in _combine_args(targets, *more_targets)]
# Validation -- otherwise izip throws a spooky error below
for p, t in zip(paths, targets):
if p is None:
raise ValueError("target %s is not in this tree" % repr(t))
mrca = self.root
for level in zip(*paths):
ref = level[0]
for other in level[1:]:
if ref is not other:
break
else:
mrca = ref
if ref is not mrca:
break
return mrca
def count_terminals(self):
"""Counts the number of terminal (leaf) nodes within this tree."""
return _utils.iterlen(self.find_clades(terminal=True))
def depths(self, unit_branch_lengths=False):
"""Create a mapping of tree clades to depths (by branch length).
:Parameters:
unit_branch_lengths : bool
If True, count only the number of branches (levels in the tree).
By default the distance is the cumulative branch length leading
to the clade.
:returns: dict of {clade: depth}, where keys are all of the Clade
instances in the tree, and values are the distance from the root to
each clade (including terminals).
"""
if unit_branch_lengths:
depth_of = lambda c: 1
else:
depth_of = lambda c: c.branch_length or 0
depths = {}
def update_depths(node, curr_depth):
depths[node] = curr_depth
for child in node.clades:
new_depth = curr_depth + depth_of(child)
update_depths(child, new_depth)
update_depths(self.root, self.root.branch_length or 0)
return depths
def distance(self, target1, target2=None):
"""Calculate the sum of the branch lengths between two targets.
If only one target is specified, the other is the root of this tree.
"""
if target2 is None:
return sum(n.branch_length for n in self.get_path(target1)
if n.branch_length is not None)
mrca = self.common_ancestor(target1, target2)
return mrca.distance(target1) + mrca.distance(target2)
def is_bifurcating(self):
"""Return True if tree downstream of node is strictly bifurcating.
I.e., all nodes have either 2 or 0 children (internal or external,
respectively). The root may have 3 descendents and still be considered
part of a bifurcating tree, because it has no ancestor.
"""
# Root can be trifurcating
if isinstance(self, Tree) and len(self.root) == 3:
return (self.root.clades[0].is_bifurcating() and
self.root.clades[1].is_bifurcating() and
self.root.clades[2].is_bifurcating())
if len(self.root) == 2:
return (self.root.clades[0].is_bifurcating() and
self.root.clades[1].is_bifurcating())
if len(self.root) == 0:
return True
return False
def is_monophyletic(self, terminals, *more_terminals):
"""MRCA of terminals if they comprise a complete subclade, or False.
I.e., there exists a clade such that its terminals are the same set as
the given targets.
The given targets must be terminals of the tree.
To match both `Bio.Nexus.Trees` and the other multi-target methods in
Bio.Phylo, arguments to this method can be specified either of two ways:
(i) as a single list of targets, or (ii) separately specified targets,
e.g. is_monophyletic(t1, t2, t3) -- but not both.
For convenience, this method returns the common ancestor (MCRA) of the
targets if they are monophyletic (instead of the value True), and False
otherwise.
:returns: common ancestor if terminals are monophyletic, otherwise False.
"""
target_set = set(_combine_args(terminals, *more_terminals))
current = self.root
while True:
if set(current.get_terminals()) == target_set:
return current
# Try a narrower subclade
for subclade in current.clades:
if set(subclade.get_terminals()).issuperset(target_set):
current = subclade
break
else:
return False
def is_parent_of(self, target=None, **kwargs):
"""True if target is a descendent of this tree.
Not required to be a direct descendent.
To check only direct descendents of a clade, simply use list membership
testing: ``if subclade in clade: ...``
"""
return self.get_path(target, **kwargs) is not None
def is_preterminal(self):
"""True if all direct descendents are terminal."""
if self.root.is_terminal():
return False
for clade in self.root.clades:
if not clade.is_terminal():
return False
return True
def total_branch_length(self):
"""Calculate the sum of all the branch lengths in this tree."""
return sum(node.branch_length
for node in self.find_clades(branch_length=True))
# Tree manipulation methods
def collapse(self, target=None, **kwargs):
"""Deletes target from the tree, relinking its children to its parent.
:returns: the parent clade.
"""
path = self.get_path(target, **kwargs)
if not path:
raise ValueError("couldn't collapse %s in this tree"
% (target or kwargs))
if len(path) == 1:
parent = self.root
else:
parent = path[-2]
popped = parent.clades.pop(parent.clades.index(path[-1]))
extra_length = popped.branch_length or 0
for child in popped:
child.branch_length += extra_length
parent.clades.extend(popped.clades)
return parent
def collapse_all(self, target=None, **kwargs):
"""Collapse all the descendents of this tree, leaving only terminals.
Total branch lengths are preserved, i.e. the distance to each terminal
stays the same.
For example, this will safely collapse nodes with poor bootstrap
support:
>>> tree.collapse_all(lambda c: c.confidence is not None and
... c.confidence < 70)
This implementation avoids strange side-effects by using level-order
traversal and testing all clade properties (versus the target
specification) up front. In particular, if a clade meets the target
specification in the original tree, it will be collapsed. For example,
if the condition is:
>>> tree.collapse_all(lambda c: c.branch_length < 0.1)
Collapsing a clade's parent node adds the parent's branch length to the
child, so during the execution of collapse_all, a clade's branch_length
may increase. In this implementation, clades are collapsed according to
their properties in the original tree, not the properties when tree
traversal reaches the clade. (It's easier to debug.) If you want the
other behavior (incremental testing), modifying the source code of this
function is straightforward.
"""
# Read the iterable into a list to protect against in-place changes
matches = list(self.find_clades(target, False, 'level', **kwargs))
if not matches:
# No matching nodes to collapse
return
# Skip the root node -- it can't be collapsed
if matches[0] == self.root:
matches.pop(0)
for clade in matches:
self.collapse(clade)
def ladderize(self, reverse=False):
"""Sort clades in-place according to the number of terminal nodes.
Deepest clades are last by default. Use ``reverse=True`` to sort clades
deepest-to-shallowest.
"""
self.root.clades.sort(key=lambda c: c.count_terminals(),
reverse=reverse)
for subclade in self.root.clades:
subclade.ladderize(reverse=reverse)
def prune(self, target=None, **kwargs):
"""Prunes a terminal clade from the tree.
If taxon is from a bifurcation, the connecting node will be collapsed
and its branch length added to remaining terminal node. This might be no
longer be a meaningful value.
:returns: parent clade of the pruned target
"""
if 'terminal' in kwargs and kwargs['terminal']:
raise ValueError("target must be terminal")
path = self.get_path(target, terminal=True, **kwargs)
if not path:
raise ValueError("can't find a matching target below this root")
if len(path) == 1:
parent = self.root
else:
parent = path[-2]
parent.clades.remove(path[-1])
if len(parent) == 1:
# We deleted a branch from a bifurcation
if parent == self.root:
# If we're at the root, move the root upwards
# NB: This loses the length of the original branch
newroot = parent.clades[0]
newroot.branch_length = None
parent = self.root = newroot
else:
# If we're not at the root, collapse this parent
child = parent.clades[0]
if child.branch_length is not None:
child.branch_length += (parent.branch_length or 0.0)
if len(path) < 3:
grandparent = self.root
else:
grandparent = path[-3]
# Replace parent with child at the same place in grandparent
index = grandparent.clades.index(parent)
grandparent.clades.pop(index)
grandparent.clades.insert(index, child)
parent = grandparent
return parent
def split(self, n=2, branch_length=1.0):
"""Generate n (default 2) new descendants.
In a species tree, this is a speciation event.
New clades have the given branch_length and the same name as this
clade's root plus an integer suffix (counting from 0). For example,
splitting a clade named "A" produces sub-clades named "A0" and "A1".
If the clade has no name, the prefix "n" is used for child nodes, e.g.
"n0" and "n1".
"""
clade_cls = type(self.root)
base_name = self.root.name or 'n'
for i in range(n):
clade = clade_cls(name=base_name + str(i),
branch_length=branch_length)
self.root.clades.append(clade)
class Tree(TreeElement, TreeMixin):
"""A phylogenetic tree, containing global info for the phylogeny.
The structure and node-specific data is accessible through the 'root'
clade attached to the Tree instance.
:Parameters:
root : Clade
The starting node of the tree. If the tree is rooted, this will
usually be the root node.
rooted : bool
Whether or not the tree is rooted. By default, a tree is assumed to
be rooted.
id : str
The identifier of the tree, if there is one.
name : str
The name of the tree, in essence a label.
"""
def __init__(self, root=None, rooted=True, id=None, name=None):
self.root = root or Clade()
self.rooted = rooted
self.id = id
self.name = name
@classmethod
def from_clade(cls, clade, **kwargs):
"""Create a new Tree object given a clade.
Keyword arguments are the usual `Tree` constructor parameters.
"""
root = copy.deepcopy(clade)
return cls(root, **kwargs)
@classmethod
def randomized(cls, taxa, branch_length=1.0, branch_stdev=None):
"""Create a randomized bifurcating tree given a list of taxa.
:param taxa: Either an integer specifying the number of taxa to create
(automatically named taxon#), or an iterable of taxon names, as
strings.
:returns: a tree of the same type as this class.
"""
if isinstance(taxa, int):
taxa = ['taxon%s' % (i + 1) for i in range(taxa)]
elif hasattr(taxa, '__iter__'):
taxa = list(taxa)
else:
raise TypeError("taxa argument must be integer (# taxa) or "
"iterable of taxon names.")
rtree = cls()
terminals = [rtree.root]
while len(terminals) < len(taxa):
newsplit = random.choice(terminals)
newsplit.split(branch_length=branch_length)
newterms = newsplit.clades
if branch_stdev:
# Add some noise to the branch lengths
for nt in newterms:
nt.branch_length = max(0,
random.gauss(branch_length, branch_stdev))
terminals.remove(newsplit)
terminals.extend(newterms)
# Distribute taxon labels randomly
random.shuffle(taxa)
for node, name in zip(terminals, taxa):
node.name = name
return rtree
@property
def clade(self):
"""The first clade in this tree (not itself)."""
return self.root
def as_phyloxml(self, **kwargs):
"""Convert this tree to a PhyloXML-compatible Phylogeny.
This lets you use the additional annotation types PhyloXML defines, and
save this information when you write this tree as 'phyloxml'.
"""
from Bio.Phylo.PhyloXML import Phylogeny
return Phylogeny.from_tree(self, **kwargs)
# XXX Py3 Compatibility: In Python 3.0+, **kwargs can be replaced with the
# named keyword argument outgroup_branch_length=None
def root_with_outgroup(self, outgroup_targets, *more_targets, **kwargs):
"""Reroot this tree with the outgroup clade containing outgroup_targets.
Operates in-place.
Edge cases:
- If ``outgroup == self.root``, no change
- If outgroup is terminal, create new bifurcating root node with a
0-length branch to the outgroup
- If outgroup is internal, use the given outgroup node as the new
trifurcating root, keeping branches the same
- If the original root was bifurcating, drop it from the tree,
preserving total branch lengths
:param outgroup_branch_length: length of the branch leading to the
outgroup after rerooting. If not specified (None), then:
- If the outgroup is an internal node (not a single terminal taxon),
then use that node as the new root.
- Otherwise, create a new root node as the parent of the outgroup.
"""
# This raises a ValueError if any target is not in this tree
# Otherwise, common_ancestor guarantees outgroup is in this tree
outgroup = self.common_ancestor(outgroup_targets, *more_targets)
outgroup_path = self.get_path(outgroup)
if len(outgroup_path) == 0:
# Outgroup is the current root -- no change
return
prev_blen = outgroup.branch_length or 0.0
# Hideous kludge because Py2.x doesn't allow keyword args after *args
outgroup_branch_length = kwargs.get('outgroup_branch_length')
if outgroup_branch_length is not None:
assert 0 <= outgroup_branch_length <= prev_blen, \
"outgroup_branch_length must be between 0 and the " \
"original length of the branch leading to the outgroup."
if outgroup.is_terminal() or outgroup_branch_length is not None:
# Create a new root with a 0-length branch to the outgroup
outgroup.branch_length = outgroup_branch_length or 0.0
new_root = self.root.__class__(
branch_length=self.root.branch_length, clades=[outgroup])
# The first branch reversal (see the upcoming loop) is modified
if len(outgroup_path) == 1:
# No nodes between the original root and outgroup to rearrange.
# Most of the code below will be skipped, but we still need
# 'new_parent' pointing at the new root.
new_parent = new_root
else:
parent = outgroup_path.pop(-2)
# First iteration of reversing the path to the outgroup
parent.clades.pop(parent.clades.index(outgroup))
(prev_blen, parent.branch_length) = (parent.branch_length,
prev_blen - outgroup.branch_length)
new_root.clades.insert(0, parent)
new_parent = parent
else:
# Use the given outgroup node as the new (trifurcating) root
new_root = outgroup
new_root.branch_length = self.root.branch_length
new_parent = new_root
# Tracing the outgroup lineage backwards, reattach the subclades under a
# new root clade. Reverse the branches directly above the outgroup in
# the tree, but keep the descendants of those clades as they are.
for parent in outgroup_path[-2::-1]:
parent.clades.pop(parent.clades.index(new_parent))
prev_blen, parent.branch_length = parent.branch_length, prev_blen
new_parent.clades.insert(0, parent)
new_parent = parent
# Finally, handle the original root according to number of descendents
old_root = self.root
if outgroup in old_root.clades:
assert len(outgroup_path) == 1
old_root.clades.pop(old_root.clades.index(outgroup))
else:
old_root.clades.pop(old_root.clades.index(new_parent))
if len(old_root) == 1:
# Delete the old bifurcating root & add branch lengths
ingroup = old_root.clades[0]
if ingroup.branch_length:
ingroup.branch_length += prev_blen
else:
ingroup.branch_length = prev_blen
new_parent.clades.insert(0, ingroup)
# ENH: If annotations are attached to old_root, do... something.
else:
# Keep the old trifurcating/polytomous root as an internal node
old_root.branch_length = prev_blen
new_parent.clades.insert(0, old_root)
self.root = new_root
self.rooted = True
return
def root_at_midpoint(self):
"""Root the tree at the midpoint of the two most distant taxa.
This operates in-place, leaving a bifurcating root. The topology of the
tree is otherwise retained, though no guarantees are made about the
stability of clade/node/taxon ordering.
"""
# Identify the largest pairwise distance
max_distance = 0.0
tips = self.get_terminals()
for tip in tips:
self.root_with_outgroup(tip)
new_max = max(self.depths().items(), key=lambda nd: nd[1])
if new_max[1] > max_distance:
tip1 = tip
tip2 = new_max[0]
max_distance = new_max[1]
self.root_with_outgroup(tip1)
# Depth to go from the ingroup tip toward the outgroup tip
root_remainder = 0.5 * (max_distance - (self.root.branch_length or 0))
assert root_remainder >= 0
# Identify the midpoint and reroot there.
# Trace the path to the outgroup tip until all of the root depth has
# been traveled/accounted for.
for node in self.get_path(tip2):
root_remainder -= node.branch_length
if root_remainder < 0:
outgroup_node = node
outgroup_branch_length = -root_remainder
break
else:
raise ValueError("Somehow, failed to find the midpoint!")
self.root_with_outgroup(outgroup_node,
outgroup_branch_length=outgroup_branch_length)
# Method assumed by TreeMixin
def is_terminal(self):
"""True if the root of this tree is terminal."""
return (not self.root.clades)
# Convention from SeqRecord and Alignment classes
def __format__(self, format_spec):
"""Serialize the tree as a string in the specified file format.
This method supports the ``format`` built-in function added in Python
2.6/3.0.
:param format_spec: a lower-case string supported by `Bio.Phylo.write`
as an output file format.
"""
if format_spec:
from Bio._py3k import StringIO
from Bio.Phylo import _io
handle = StringIO()
_io.write([self], handle, format_spec)
return handle.getvalue()
else:
# Follow python convention and default to using __str__
return str(self)
def format(self, format):
"""Serialize the tree as a string in the specified file format.
This duplicates the __format__ magic method for pre-2.6 Pythons.
"""
return self.__format__(format)
# Pretty-printer for the entire tree hierarchy
def __str__(self):
"""String representation of the entire tree.
Serializes each sub-clade recursively using ``repr`` to create a summary
of the object structure.
"""
TAB = ' '
textlines = []
def print_tree(obj, indent):
"""Recursively serialize sub-elements.
This closes over textlines and modifies it in-place.
"""
textlines.append(TAB * indent + repr(obj))
indent += 1
for attr in obj.__dict__:
child = getattr(obj, attr)
if isinstance(child, TreeElement):
print_tree(child, indent)
elif isinstance(child, list):
for elem in child:
if isinstance(elem, TreeElement):
print_tree(elem, indent)
print_tree(self, 0)
return '\n'.join(textlines)
class Clade(TreeElement, TreeMixin):
"""A recursively defined sub-tree.
:Parameters:
branch_length : str
The length of the branch leading to the root node of this clade.
name : str
The clade's name (a label).
clades : list
Sub-trees rooted directly under this tree's root.
confidence : number
Support.
color : BranchColor
The display color of the branch and descendents.
width : number
The display width of the branch and descendents.
"""
def __init__(self, branch_length=None, name=None, clades=None,
confidence=None, color=None, width=None):
self.branch_length = branch_length
self.name = name
self.clades = clades or []
self.confidence = confidence
self.color = color
self.width = width
@property
def root(self):
"""Allow TreeMixin methods to traverse clades properly."""
return self
def is_terminal(self):
"""True if this is a terminal (leaf) node."""
return (not self.clades)
# Sequence-type behavior methods
def __getitem__(self, index):
"""Get clades by index (integer or slice)."""
if isinstance(index, int) or isinstance(index, slice):
return self.clades[index]
ref = self
for idx in index:
ref = ref[idx]
return ref
def __iter__(self):
"""Iterate through this tree's direct descendent clades (sub-trees)."""
return iter(self.clades)
def __len__(self):
"""Number of clades directy under the root."""
return len(self.clades)
# Python 3:
def __bool__(self):
"""Boolean value of an instance of this class (True).
NB: If this method is not defined, but ``__len__`` is, then the object
is considered true if the result of ``__len__()`` is nonzero. We want
Clade instances to always be considered True.
"""
return True
# Python 2:
__nonzero__ = __bool__
def __str__(self):
if self.name:
return _utils.trim_str(self.name, 40, '...')
return self.__class__.__name__
# Syntax sugar for setting the branch color
def _get_color(self):
return self._color
def _set_color(self, arg):
if arg is None or isinstance(arg, BranchColor):
self._color = arg
elif isinstance(arg, basestring):
if arg in BranchColor.color_names:
# Known color name
self._color = BranchColor.from_name(arg)
elif arg.startswith('#') and len(arg) == 7:
# HTML-style hex string
self._color = BranchColor.from_hex(arg)
else:
raise ValueError("invalid color string %s" % arg)
elif hasattr(arg, '__iter__') and len(arg) == 3:
# RGB triplet
self._color = BranchColor(*arg)
else:
raise ValueError("invalid color value %s" % arg)
color = property(_get_color, _set_color, doc="Branch color.")
class BranchColor(object):
"""Indicates the color of a clade when rendered graphically.
The color should be interpreted by client code (e.g. visualization
programs) as applying to the whole clade, unless overwritten by the
color(s) of sub-clades.
Color values must be integers from 0 to 255.
"""
color_names = {
'red': (255, 0, 0),
'r': (255, 0, 0),
'yellow': (255, 255, 0),
'y': (255, 255, 0),
'green': ( 0, 128, 0),
'g': ( 0, 128, 0),
'cyan': ( 0, 255, 255),
'c': ( 0, 255, 255),
'blue': ( 0, 0, 255),
'b': ( 0, 0, 255),
'magenta': (255, 0, 255),
'm': (255, 0, 255),
'black': ( 0, 0, 0),
'k': ( 0, 0, 0),
'white': (255, 255, 255),
'w': (255, 255, 255),
# Names standardized in HTML/CSS spec
# http://w3schools.com/html/html_colornames.asp
'maroon': (128, 0, 0),
'olive': (128, 128, 0),
'lime': ( 0, 255, 0),
'aqua': ( 0, 255, 255),
'teal': ( 0, 128, 128),
'navy': ( 0, 0, 128),
'fuchsia': (255, 0, 255),
'purple': (128, 0, 128),
'silver': (192, 192, 192),
'gray': (128, 128, 128),
# More definitions from matplotlib/gcolor2
'grey': (128, 128, 128),
'pink': (255, 192, 203),
'salmon': (250, 128, 114),
'orange': (255, 165, 0),
'gold': (255, 215, 0),
'tan': (210, 180, 140),
'brown': (165, 42, 42),
}
def __init__(self, red, green, blue):
for color in (red, green, blue):
assert (isinstance(color, int) and
0 <= color <= 255
), "Color values must be integers between 0 and 255."
self.red = red
self.green = green
self.blue = blue
@classmethod
def from_hex(cls, hexstr):
"""Construct a BranchColor object from a hexadecimal string.
The string format is the same style used in HTML and CSS, such as
'#FF8000' for an RGB value of (255, 128, 0).
"""
assert (isinstance(hexstr, basestring) and
hexstr.startswith('#') and
len(hexstr) == 7
), "need a 24-bit hexadecimal string, e.g. #000000"
RGB = hexstr[1:3], hexstr[3:5], hexstr[5:]
return cls(*[int('0x' + cc, base=16) for cc in RGB])
@classmethod
def from_name(cls, colorname):
"""Construct a BranchColor object by the color's name."""
return cls(*cls.color_names[colorname])
def to_hex(self):
"""Return a 24-bit hexadecimal RGB representation of this color.
The returned string is suitable for use in HTML/CSS, as a color
parameter in matplotlib, and perhaps other situations.
Example:
>>> bc = BranchColor(12, 200, 100)
>>> bc.to_hex()
'#0cc864'
"""
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def to_rgb(self):
"""Return a tuple of RGB values (0 to 255) representing this color.
Example:
>>> bc = BranchColor(255, 165, 0)
>>> bc.to_rgb()
(255, 165, 0)
"""
return (self.red, self.green, self.blue)
def __repr__(self):
"""Preserve the standard RGB order when representing this object."""
return (u'%s(red=%d, green=%d, blue=%d)'
% (self.__class__.__name__, self.red, self.green, self.blue))
def __str__(self):
"""Show the color's RGB values."""
return "(%d, %d, %d)" % (self.red, self.green, self.blue)
|
gpl-2.0
|
empeeu/numpy
|
doc/example.py
|
81
|
3581
|
"""This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
|
bsd-3-clause
|
kastman/lyman
|
conftest.py
|
1
|
10615
|
import numpy as np
import pandas as pd
import nibabel as nib
import pytest
from moss import Bunch # TODO change to lyman version when implemented
@pytest.fixture()
def execdir(tmpdir):
origdir = tmpdir.chdir()
yield tmpdir
origdir.chdir()
@pytest.fixture()
def lyman_info(tmpdir):
data_dir = tmpdir.mkdir("data")
proc_dir = tmpdir.mkdir("analysis")
cache_dir = tmpdir.mkdir("cache")
# TODO probably get these from default info functions
scan_info = {
"subj01": {
"sess01":
{"exp_alpha": ["run01", "run02"]},
"sess02":
{"exp_alpha": ["run01"],
"exp_beta": ["run01", "run02", "run03"]},
},
"subj02": {
"sess01":
{"exp_alpha": ["run01", "run02", "run03"]}
},
}
proj_info = Bunch(
data_dir=str(data_dir),
proc_dir=str(proc_dir),
cache_dir=str(cache_dir),
scan_info=scan_info,
phase_encoding="ap",
fm_template="{session}_{encoding}.nii.gz",
ts_template="{session}_{experiment}_{run}.nii.gz",
sb_template="{session}_{experiment}_{run}_sbref.nii.gz",
)
exp_info = Bunch(
name="exp_alpha",
tr=1.5,
)
model_info = Bunch(
name="model_a",
smooth_fwhm=4,
surface_smoothing=True,
hpf_cutoff=10,
save_residuals=True,
# TODO FIX
contrasts=["a", "b", "a-b"]
)
subjects = ["subj01", "subj02"]
sessions = None
design = pd.DataFrame(dict(
onset=[0, 6, 12, 18, 24],
condition=["a", "b", "c", "b", "a"],
session="sess01",
run="run01",
))
for subject in subjects:
subject_dir = data_dir.mkdir(subject)
subject_dir.mkdir("mri")
subject_dir.mkdir("surf")
subject_dir.mkdir("func")
design_dir = subject_dir.mkdir("design")
design.to_csv(design_dir.join("model_a.csv"))
vol_shape = 12, 8, 4
n_tp = 20
n_params = len(design["condition"].unique())
return dict(
proj_info=proj_info,
subjects=subjects,
sessions=sessions,
exp_info=exp_info,
model_info=model_info,
proc_dir=proc_dir,
data_dir=data_dir,
vol_shape=vol_shape,
n_tp=n_tp,
n_params=n_params,
)
@pytest.fixture()
def freesurfer(lyman_info):
subject = "subj01"
mri_dir = lyman_info["data_dir"].join(subject).join("mri")
seed = sum(map(ord, "freesurfer"))
rs = np.random.RandomState(seed)
affine = np.eye(4)
vol_shape = lyman_info["vol_shape"]
mask = rs.choice([0, 1], vol_shape, p=[.2, .8])
norm_data = rs.randint(0, 110, vol_shape) * mask
norm_file = str(mri_dir.join("norm.mgz"))
nib.save(nib.MGHImage(norm_data.astype("uint8"), affine), norm_file)
wmparc_vals = [1000, 10, 11, 16, 8, 3000, 5001, 7, 46, 4]
wmparc_data = rs.choice(wmparc_vals, vol_shape) * mask
wmparc_file = str(mri_dir.join("wmparc.mgz"))
nib.save(nib.MGHImage(wmparc_data.astype("int16"), affine), wmparc_file)
lyman_info.update(
subject=subject,
norm_file=norm_file,
wmparc_file=wmparc_file,
)
return lyman_info
@pytest.fixture()
def template(lyman_info):
subject = "subj01"
template_dir = (lyman_info["proc_dir"]
.mkdir(subject)
.mkdir("template"))
seed = sum(map(ord, "template"))
rs = np.random.RandomState(seed)
vol_shape = lyman_info["vol_shape"]
affine = np.array([[-2, 0, 0, 10],
[0, -2, -1, 10],
[0, 1, 2, 5],
[0, 0, 0, 1]])
reg_file = str(template_dir.join("anat2func.mat"))
np.savetxt(reg_file, np.random.randn(4, 4))
seg_data = rs.randint(0, 7, vol_shape)
seg_file = str(template_dir.join("seg.nii.gz"))
nib.save(nib.Nifti1Image(seg_data, affine), seg_file)
anat_data = rs.randint(0, 100, vol_shape)
anat_file = str(template_dir.join("anat.nii.gz"))
nib.save(nib.Nifti1Image(anat_data, affine), anat_file)
mask_data = (seg_data > 0).astype(np.uint8)
mask_file = str(template_dir.join("mask.nii.gz"))
nib.save(nib.Nifti1Image(mask_data, affine), mask_file)
n_verts = (seg_data == 1).sum()
surf_ids = np.arange(n_verts)
surf_data = np.full(vol_shape + (2,), -1, np.int)
surf_data[seg_data == 1, 0] = surf_ids
surf_data[seg_data == 1, 1] = surf_ids
surf_file = str(template_dir.join("surf.nii.gz"))
nib.save(nib.Nifti1Image(surf_data, affine), surf_file)
verts = rs.uniform(-1, 1, (n_verts, 3))
faces = np.array([(i, i + 1, i + 2) for i in range(n_verts - 2)])
surf_dir = lyman_info["data_dir"].join(subject).join("surf")
mesh_files = (str(surf_dir.join("lh.graymid")),
str(surf_dir.join("rh.graymid")))
for fname in mesh_files:
nib.freesurfer.write_geometry(fname, verts, faces)
lyman_info.update(
vol_shape=vol_shape,
subject=subject,
reg_file=reg_file,
seg_file=seg_file,
anat_file=anat_file,
mask_file=mask_file,
surf_file=surf_file,
mesh_files=mesh_files,
)
return lyman_info
@pytest.fixture()
def timeseries(template):
seed = sum(map(ord, "timeseries"))
rs = np.random.RandomState(seed)
session = "sess01"
run = "run01"
exp_name = template["exp_info"].name
model_name = template["model_info"].name
vol_shape = template["vol_shape"]
n_tp = template["n_tp"]
affine = np.eye(4)
affine[:3, :3] *= 2
timeseries_dir = (template["proc_dir"]
.join(template["subject"])
.mkdir(exp_name)
.mkdir("timeseries")
.mkdir("{}_{}".format(session, run)))
model_dir = (template["proc_dir"]
.join(template["subject"])
.join(exp_name)
.mkdir(model_name)
.mkdir("{}_{}".format(session, run)))
mask_data = nib.load(template["seg_file"]).get_data() > 0
mask_data &= rs.uniform(0, 1, vol_shape) > .05
mask_file = str(timeseries_dir.join("mask.nii.gz"))
nib.save(nib.Nifti1Image(mask_data.astype(np.int), affine), mask_file)
noise_data = mask_data & rs.choice([False, True], vol_shape, p=[.95, .05])
noise_file = str(timeseries_dir.join("noise.nii.gz"))
nib.save(nib.Nifti1Image(noise_data.astype(np.int), affine), noise_file)
ts_shape = vol_shape + (n_tp,)
ts_data = rs.normal(100, 5, ts_shape) * mask_data[..., np.newaxis]
ts_file = str(timeseries_dir.join("func.nii.gz"))
nib.save(nib.Nifti1Image(ts_data, affine), ts_file)
mc_data = rs.normal(0, 1, (n_tp, 6))
mc_file = str(timeseries_dir.join("mc.csv"))
cols = ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]
pd.DataFrame(mc_data, columns=cols).to_csv(mc_file)
template.update(
n_tp=n_tp,
affine=affine,
session=session,
run=run,
mask_file=mask_file,
noise_file=noise_file,
ts_file=ts_file,
mc_file=mc_file,
timeseries_dir=timeseries_dir,
model_dir=model_dir,
)
return template
@pytest.fixture()
def modelfit(timeseries):
seed = sum(map(ord, "modelfit"))
rs = np.random.RandomState(seed)
vol_shape = timeseries["vol_shape"]
affine = timeseries["affine"]
n_params = timeseries["n_params"]
model_dir = timeseries["model_dir"]
seg_data = nib.load(timeseries["seg_file"]).get_data()
mask_data = nib.load(timeseries["mask_file"]).get_data()
mask_data = ((seg_data == 1) & (mask_data == 1)).astype(np.int)
mask_file = str(model_dir.join("mask.nii.gz"))
nib.save(nib.Nifti1Image(mask_data, affine), mask_file)
beta_data = rs.normal(0, 1, vol_shape + (n_params,))
beta_file = str(model_dir.join("beta.nii.gz"))
nib.save(nib.Nifti1Image(beta_data, affine), beta_file)
ols_data = rs.uniform(0, 1, vol_shape + (n_params, n_params))
ols_data += ols_data.transpose(0, 1, 2, 4, 3)
ols_data = ols_data.reshape(vol_shape + (n_params ** 2,))
ols_file = str(model_dir.join("ols.nii.gz"))
nib.save(nib.Nifti1Image(ols_data, affine), ols_file)
error_data = rs.uniform(0, 5, vol_shape)
error_file = str(model_dir.join("error.nii.gz"))
nib.save(nib.Nifti1Image(error_data, affine), error_file)
timeseries.update(
n_params=n_params,
mask_file=mask_file,
beta_file=beta_file,
ols_file=ols_file,
error_file=error_file,
)
return timeseries
@pytest.fixture()
def modelres(modelfit):
seed = sum(map(ord, "modelfit"))
rs = np.random.RandomState(seed)
vol_shape = modelfit["vol_shape"]
affine = modelfit["affine"]
n_params = modelfit["n_params"]
# TODO Fix this when constrast definition is done
n_contrasts = n_params
model_dir = modelfit["model_dir"]
contrast_data = rs.normal(0, 5, vol_shape + (n_contrasts,))
contrast_file = str(model_dir.join("contrast.nii.gz"))
nib.save(nib.Nifti1Image(contrast_data, affine), contrast_file)
variance_data = rs.uniform(0, 5, vol_shape + (n_contrasts,))
variance_file = str(model_dir.join("variance.nii.gz"))
nib.save(nib.Nifti1Image(variance_data, affine), variance_file)
tstat_data = rs.normal(0, 2, vol_shape + (n_contrasts,))
tstat_file = str(model_dir.join("tstat.nii.gz"))
nib.save(nib.Nifti1Image(tstat_data, affine), tstat_file)
modelfit.update(
contrast_file=contrast_file,
variance_file=variance_file,
tstat_file=tstat_file,
)
return modelfit
@pytest.fixture
def meshdata(execdir):
verts = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 1],
[2, 0, 0],
[2, 2, 2]], np.float)
faces = np.array([[0, 1, 2],
[0, 2, 3],
[2, 3, 4]], np.int)
sqrt2 = np.sqrt(2)
sqrt3 = np.sqrt(3)
sqrt8 = np.sqrt(8)
neighbors = {0: {1: 1.0, 2: sqrt3, 3: 2.0},
1: {0: 1.0, 2: sqrt2},
2: {0: sqrt3, 1: sqrt2, 3: sqrt3, 4: sqrt3},
3: {0: 2.0, 2: sqrt3, 4: sqrt8},
4: {2: sqrt3, 3: sqrt8}}
fname = execdir.join("test.mesh")
nib.freesurfer.write_geometry(fname, verts, faces)
meshdata = dict(
verts=verts,
faces=faces,
neighbors=neighbors,
fname=fname,
)
return meshdata
|
bsd-3-clause
|
tinghuiz/learn-reflectance
|
caffe/python/detect.py
|
23
|
5743
|
#!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.