text stringlengths 81 112k |
|---|
Remove the layer artist for good
def remove(self):
"""
Remove the layer artist for good
"""
self._multivol.deallocate(self.id)
ARRAY_CACHE.pop(self.id, None)
PIXEL_CACHE.pop(self.id, None) |
Inject functions and constants from PyOpenGL but leave out the
names that are deprecated or that we provide in our API.
def _inject():
""" Inject functions and constants from PyOpenGL but leave out the
names that are deprecated or that we provide in our API.
"""
# Get namespaces
NS = globals()
GLNS = _GL.__dict__
# Get names that we use in our API
used_names = []
used_names.extend([names[0] for names in _pyopengl2._functions_to_import])
used_names.extend([name for name in _pyopengl2._used_functions])
NS['_used_names'] = used_names
#
used_constants = set(_constants.__dict__)
# Count
injected_constants = 0
injected_functions = 0
for name in dir(_GL):
if name.startswith('GL_'):
# todo: find list of deprecated constants
if name not in used_constants:
NS[name] = GLNS[name]
injected_constants += 1
elif name.startswith('gl'):
# Functions
if (name + ',') in _deprecated_functions:
pass # Function is deprecated
elif name in used_names:
pass # Function is in our GL ES 2.0 API
else:
NS[name] = GLNS[name]
injected_functions += 1 |
Alternative to `imp.find_module` that can also search in subpackages.
def _find_module(name, path=None):
"""
Alternative to `imp.find_module` that can also search in subpackages.
"""
parts = name.split('.')
for part in parts:
if path is not None:
path = [path]
fh, path, descr = imp.find_module(part, path)
if fh is not None and part != parts[-1]:
fh.close()
return fh, path, descr |
Triangulate a set of vertices
Parameters
----------
vertices : array-like
The vertices.
Returns
-------
vertices : array-like
The vertices.
tringles : array-like
The triangles.
def triangulate(vertices):
"""Triangulate a set of vertices
Parameters
----------
vertices : array-like
The vertices.
Returns
-------
vertices : array-like
The vertices.
tringles : array-like
The triangles.
"""
n = len(vertices)
vertices = np.asarray(vertices)
zmean = vertices[:, 2].mean()
vertices_2d = vertices[:, :2]
segments = np.repeat(np.arange(n + 1), 2)[1:-1]
segments[-2:] = n - 1, 0
if _TRIANGLE_AVAILABLE:
vertices_2d, triangles = _triangulate_cpp(vertices_2d, segments)
else:
vertices_2d, triangles = _triangulate_python(vertices_2d, segments)
vertices = np.empty((len(vertices_2d), 3))
vertices[:, :2] = vertices_2d
vertices[:, 2] = zmean
return vertices, triangles |
Do the triangulation
def triangulate(self):
"""Do the triangulation
"""
self._initialize()
pts = self.pts
front = self._front
## Begin sweep (sec. 3.4)
for i in range(3, pts.shape[0]):
pi = pts[i]
#debug("========== New point %d: %s ==========" % (i, pi))
# First, triangulate from front to new point
# This applies to both "point events" (3.4.1)
# and "edge events" (3.4.2).
# get index along front that intersects pts[i]
l = 0
while pts[front[l+1], 0] <= pi[0]:
l += 1
pl = pts[front[l]]
# "(i) middle case"
if pi[0] > pl[0]:
#debug(" mid case")
# Add a single triangle connecting pi,pl,pr
self._add_tri(front[l], front[l+1], i)
front.insert(l+1, i)
# "(ii) left case"
else:
#debug(" left case")
# Add triangles connecting pi,pl,ps and pi,pl,pr
self._add_tri(front[l], front[l+1], i)
self._add_tri(front[l-1], front[l], i)
front[l] = i
#debug(front)
# Continue adding triangles to smooth out front
# (heuristics shown in figs. 9, 10)
#debug("Smoothing front...")
for direction in -1, 1:
while True:
# Find point connected to pi
ind0 = front.index(i)
ind1 = ind0 + direction
ind2 = ind1 + direction
if ind2 < 0 or ind2 >= len(front):
break
# measure angle made with front
p1 = pts[front[ind1]]
p2 = pts[front[ind2]]
err = np.geterr()
np.seterr(invalid='ignore')
try:
angle = np.arccos(self._cosine(pi, p1, p2))
finally:
np.seterr(**err)
# if angle is < pi/2, make new triangle
#debug("Smooth angle:", pi, p1, p2, angle)
if angle > np.pi/2. or np.isnan(angle):
break
assert (i != front[ind1] and
front[ind1] != front[ind2] and
front[ind2] != i)
self._add_tri(i, front[ind1], front[ind2],
source='smooth1')
front.pop(ind1)
#debug("Finished smoothing front.")
# "edge event" (sec. 3.4.2)
# remove any triangles cut by completed edges and re-fill
# the holes.
if i in self._tops:
for j in self._bottoms[self._tops == i]:
# Make sure edge (j, i) is present in mesh
# because edge event may have created a new front list
self._edge_event(i, j)
front = self._front
self._finalize()
self.tris = np.array(list(self.tris.keys()), dtype=int) |
Force edge (i, j) to be present in mesh.
This works by removing intersected triangles and filling holes up to
the cutting edge.
def _edge_event(self, i, j):
"""
Force edge (i, j) to be present in mesh.
This works by removing intersected triangles and filling holes up to
the cutting edge.
"""
front_index = self._front.index(i)
#debug(" == edge event ==")
front = self._front
# First just see whether this edge is already present
# (this is not in the published algorithm)
if (i, j) in self._edges_lookup or (j, i) in self._edges_lookup:
#debug(" already added.")
return
#debug(" Edge (%d,%d) not added yet. Do edge event. (%s - %s)" %
# (i, j, pts[i], pts[j]))
# traverse in two different modes:
# 1. If cutting edge is below front, traverse through triangles. These
# must be removed and the resulting hole re-filled. (fig. 12)
# 2. If cutting edge is above the front, then follow the front until
# crossing under again. (fig. 13)
# We must be able to switch back and forth between these
# modes (fig. 14)
# Collect points that draw the open polygons on either side of the
# cutting edge. Note that our use of 'upper' and 'lower' is not strict;
# in some cases the two may be swapped.
upper_polygon = [i]
lower_polygon = [i]
# Keep track of which section of the front must be replaced
# and with what it should be replaced
front_holes = [] # contains indexes for sections of front to remove
next_tri = None # next triangle to cut (already set if in mode 1)
last_edge = None # or last triangle edge crossed (if in mode 1)
# Which direction to traverse front
front_dir = 1 if self.pts[j][0] > self.pts[i][0] else -1
# Initialize search state
if self._edge_below_front((i, j), front_index):
mode = 1 # follow triangles
tri = self._find_cut_triangle((i, j))
last_edge = self._edge_opposite_point(tri, i)
next_tri = self._adjacent_tri(last_edge, i)
assert next_tri is not None
self._remove_tri(*tri)
# todo: does this work? can we count on last_edge to be clockwise
# around point i?
lower_polygon.append(last_edge[1])
upper_polygon.append(last_edge[0])
else:
mode = 2 # follow front
# Loop until we reach point j
while True:
#debug(" == edge_event loop: mode %d ==" % mode)
#debug(" front_holes:", front_holes, front)
#debug(" front_index:", front_index)
#debug(" next_tri:", next_tri)
#debug(" last_edge:", last_edge)
#debug(" upper_polygon:", upper_polygon)
#debug(" lower_polygon:", lower_polygon)
#debug(" =====")
if mode == 1:
# crossing from one triangle into another
if j in next_tri:
#debug(" -> hit endpoint!")
# reached endpoint!
# update front / polygons
upper_polygon.append(j)
lower_polygon.append(j)
#debug(" Appended to upper_polygon:", upper_polygon)
#debug(" Appended to lower_polygon:", lower_polygon)
self._remove_tri(*next_tri)
break
else:
# next triangle does not contain the end point; we will
# cut one of the two far edges.
tri_edges = self._edges_in_tri_except(next_tri, last_edge)
# select the edge that is cut
last_edge = self._intersected_edge(tri_edges, (i, j))
#debug(" set last_edge to intersected edge:", last_edge)
last_tri = next_tri
next_tri = self._adjacent_tri(last_edge, last_tri)
#debug(" set next_tri:", next_tri)
self._remove_tri(*last_tri)
# Crossing an edge adds one point to one of the polygons
if lower_polygon[-1] == last_edge[0]:
upper_polygon.append(last_edge[1])
#debug(" Appended to upper_polygon:", upper_polygon)
elif lower_polygon[-1] == last_edge[1]:
upper_polygon.append(last_edge[0])
#debug(" Appended to upper_polygon:", upper_polygon)
elif upper_polygon[-1] == last_edge[0]:
lower_polygon.append(last_edge[1])
#debug(" Appended to lower_polygon:", lower_polygon)
elif upper_polygon[-1] == last_edge[1]:
lower_polygon.append(last_edge[0])
#debug(" Appended to lower_polygon:", lower_polygon)
else:
raise RuntimeError("Something went wrong..")
# If we crossed the front, go to mode 2
x = self._edge_in_front(last_edge)
if x >= 0: # crossing over front
#debug(" -> crossed over front, prepare for mode 2")
mode = 2
next_tri = None
#debug(" set next_tri: None")
# where did we cross the front?
# nearest to new point
front_index = x + (1 if front_dir == -1 else 0)
#debug(" set front_index:", front_index)
# Select the correct polygon to be lower_polygon
# (because mode 2 requires this).
# We know that last_edge is in the front, and
# front[front_index] is the point _above_ the front.
# So if this point is currently the last element in
# lower_polygon, then the polys must be swapped.
if lower_polygon[-1] == front[front_index]:
tmp = lower_polygon, upper_polygon
upper_polygon, lower_polygon = tmp
#debug(' Swap upper/lower polygons')
else:
assert upper_polygon[-1] == front[front_index]
else:
assert next_tri is not None
else: # mode == 2
# At each iteration, we require:
# * front_index is the starting index of the edge _preceding_
# the edge that will be handled in this iteration
# * lower_polygon is the polygon to which points should be
# added while traversing the front
front_index += front_dir
#debug(" Increment front_index: %d" % front_index)
next_edge = (front[front_index], front[front_index+front_dir])
#debug(" Set next_edge: %s" % repr(next_edge))
assert front_index >= 0
if front[front_index] == j:
# found endpoint!
#debug(" -> hit endpoint!")
lower_polygon.append(j)
upper_polygon.append(j)
#debug(" Appended to upper_polygon:", upper_polygon)
#debug(" Appended to lower_polygon:", lower_polygon)
break
# Add point to lower_polygon.
# The conditional is because there are cases where the
# point was already added if we just crossed from mode 1.
if lower_polygon[-1] != front[front_index]:
lower_polygon.append(front[front_index])
#debug(" Appended to lower_polygon:", lower_polygon)
front_holes.append(front_index)
#debug(" Append to front_holes:", front_holes)
if self._edges_intersect((i, j), next_edge):
# crossing over front into triangle
#debug(" -> crossed over front, prepare for mode 1")
mode = 1
last_edge = next_edge
#debug(" Set last_edge:", last_edge)
# we are crossing the front, so this edge only has one
# triangle.
next_tri = self._tri_from_edge(last_edge)
#debug(" Set next_tri:", next_tri)
upper_polygon.append(front[front_index+front_dir])
#debug(" Appended to upper_polygon:", upper_polygon)
#else:
#debug(" -> did not cross front..")
#debug("Finished edge_event:")
#debug(" front_holes:", front_holes)
#debug(" upper_polygon:", upper_polygon)
#debug(" lower_polygon:", lower_polygon)
# (iii) triangluate empty areas
#debug("Filling edge_event polygons...")
for polygon in [lower_polygon, upper_polygon]:
dist = self._distances_from_line((i, j), polygon)
#debug("Distances:", dist)
while len(polygon) > 2:
ind = np.argmax(dist)
#debug("Next index: %d" % ind)
self._add_tri(polygon[ind], polygon[ind-1],
polygon[ind+1], legal=False,
source='edge_event')
polygon.pop(ind)
dist.pop(ind)
#debug("Finished filling edge_event polygons.")
# update front by removing points in the holes (places where front
# passes below the cut edge)
front_holes.sort(reverse=True)
for i in front_holes:
front.pop(i) |
Return the triangle that has edge[0] as one of its vertices and is
bisected by edge.
Return None if no triangle is found.
def _find_cut_triangle(self, edge):
"""
Return the triangle that has edge[0] as one of its vertices and is
bisected by edge.
Return None if no triangle is found.
"""
edges = [] # opposite edge for each triangle attached to edge[0]
for tri in self.tris:
if edge[0] in tri:
edges.append(self._edge_opposite_point(tri, edge[0]))
for oedge in edges:
o1 = self._orientation(edge, oedge[0])
o2 = self._orientation(edge, oedge[1])
#debug(edge, oedge, o1, o2)
#debug(self.pts[np.array(edge)])
#debug(self.pts[np.array(oedge)])
if o1 != o2:
return (edge[0], oedge[0], oedge[1])
return None |
Return the index where *edge* appears in the current front.
If the edge is not in the front, return -1
def _edge_in_front(self, edge):
""" Return the index where *edge* appears in the current front.
If the edge is not in the front, return -1
"""
e = (list(edge), list(edge)[::-1])
for i in range(len(self._front)-1):
if self._front[i:i+2] in e:
return i
return -1 |
Given a triangle, return the edge that is opposite point i.
Vertexes are returned in the same orientation as in tri.
def _edge_opposite_point(self, tri, i):
""" Given a triangle, return the edge that is opposite point i.
Vertexes are returned in the same orientation as in tri.
"""
ind = tri.index(i)
return (tri[(ind+1) % 3], tri[(ind+2) % 3]) |
Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle.
def _adjacent_tri(self, edge, i):
"""
Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle.
"""
if not np.isscalar(i):
i = [x for x in i if x not in edge][0]
try:
pt1 = self._edges_lookup[edge]
pt2 = self._edges_lookup[(edge[1], edge[0])]
except KeyError:
return None
if pt1 == i:
return (edge[1], edge[0], pt2)
elif pt2 == i:
return (edge[1], edge[0], pt1)
else:
raise RuntimeError("Edge %s and point %d do not form a triangle "
"in this mesh." % (edge, i)) |
Return the only tri that contains *edge*. If two tris share this
edge, raise an exception.
def _tri_from_edge(self, edge):
"""Return the only tri that contains *edge*. If two tris share this
edge, raise an exception.
"""
edge = tuple(edge)
p1 = self._edges_lookup.get(edge, None)
p2 = self._edges_lookup.get(edge[::-1], None)
if p1 is None:
if p2 is None:
raise RuntimeError("No tris connected to edge %r" % (edge,))
return edge + (p2,)
elif p2 is None:
return edge + (p1,)
else:
raise RuntimeError("Two triangles connected to edge %r" % (edge,)) |
Return the edges in *tri*, excluding *edge*.
def _edges_in_tri_except(self, tri, edge):
"""Return the edges in *tri*, excluding *edge*.
"""
edges = [(tri[i], tri[(i+1) % 3]) for i in range(3)]
try:
edges.remove(tuple(edge))
except ValueError:
edges.remove(tuple(edge[::-1]))
return edges |
Return True if *edge* is below the current front.
One of the points in *edge* must be _on_ the front, at *front_index*.
def _edge_below_front(self, edge, front_index):
"""Return True if *edge* is below the current front.
One of the points in *edge* must be _on_ the front, at *front_index*.
"""
f0 = self._front[front_index-1]
f1 = self._front[front_index+1]
return (self._orientation(edge, f0) > 0 and
self._orientation(edge, f1) < 0) |
Given a list of *edges*, return the first that is intersected by
*cut_edge*.
def _intersected_edge(self, edges, cut_edge):
""" Given a list of *edges*, return the first that is intersected by
*cut_edge*.
"""
for edge in edges:
if self._edges_intersect(edge, cut_edge):
return edge |
Return a dictionary containing, for each edge in self.edges, a list
of the positions at which the edge should be split.
def _find_edge_intersections(self):
"""
Return a dictionary containing, for each edge in self.edges, a list
of the positions at which the edge should be split.
"""
edges = self.pts[self.edges]
cuts = {} # { edge: [(intercept, point), ...], ... }
for i in range(edges.shape[0]-1):
# intersection of edge i onto all others
int1 = self._intersect_edge_arrays(edges[i:i+1], edges[i+1:])
# intersection of all edges onto edge i
int2 = self._intersect_edge_arrays(edges[i+1:], edges[i:i+1])
# select for pairs that intersect
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
mask1 = (int1 >= 0) & (int1 <= 1)
mask2 = (int2 >= 0) & (int2 <= 1)
mask3 = mask1 & mask2 # all intersections
finally:
np.seterr(**err)
# compute points of intersection
inds = np.argwhere(mask3)[:, 0]
if len(inds) == 0:
continue
h = int2[inds][:, np.newaxis]
pts = (edges[i, 0][np.newaxis, :] * (1.0 - h) +
edges[i, 1][np.newaxis, :] * h)
# record for all edges the location of cut points
edge_cuts = cuts.setdefault(i, [])
for j, ind in enumerate(inds):
if 0 < int2[ind] < 1:
edge_cuts.append((int2[ind], pts[j]))
if 0 < int1[ind] < 1:
other_cuts = cuts.setdefault(ind+i+1, [])
other_cuts.append((int1[ind], pts[j]))
# sort all cut lists by intercept, remove duplicates
for k, v in cuts.items():
v.sort(key=lambda x: x[0])
for i in range(len(v)-2, -1, -1):
if v[i][0] == v[i+1][0]:
v.pop(i+1)
return cuts |
Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
def _projection(self, a, b, c):
"""Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
"""
ab = b - a
ac = c - a
return a + ((ab*ac).sum() / (ac*ac).sum()) * ac |
Return 1 if edges intersect completely (endpoints excluded)
def _edges_intersect(self, edge1, edge2):
"""
Return 1 if edges intersect completely (endpoints excluded)
"""
h12 = self._intersect_edge_arrays(self.pts[np.array(edge1)],
self.pts[np.array(edge2)])
h21 = self._intersect_edge_arrays(self.pts[np.array(edge2)],
self.pts[np.array(edge1)])
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
out = (0 < h12 < 1) and (0 < h21 < 1)
finally:
np.seterr(**err)
return out |
Return a 2D array of intercepts such that
intercepts[i, j] is the intercept of lines[i] onto lines[j].
*lines* must be an array of point locations with shape (N, 2, 2), where
the axes are (lines, points_per_line, xy_per_point).
The intercept is described in intersect_edge_arrays().
def _intersection_matrix(self, lines):
"""
Return a 2D array of intercepts such that
intercepts[i, j] is the intercept of lines[i] onto lines[j].
*lines* must be an array of point locations with shape (N, 2, 2), where
the axes are (lines, points_per_line, xy_per_point).
The intercept is described in intersect_edge_arrays().
"""
return self._intersect_edge_arrays(lines[:, np.newaxis, ...],
lines[np.newaxis, ...]) |
Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
def _intersect_edge_arrays(self, lines1, lines2):
"""Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
"""
# vector for each line in lines1
l1 = lines1[..., 1, :] - lines1[..., 0, :]
# vector for each line in lines2
l2 = lines2[..., 1, :] - lines2[..., 0, :]
# vector between first point of each line
diff = lines1[..., 0, :] - lines2[..., 0, :]
p = l1.copy()[..., ::-1] # vectors perpendicular to l1
p[..., 0] *= -1
f = (l2 * p).sum(axis=-1) # l2 dot p
# tempting, but bad idea!
#f = np.where(f==0, 1, f)
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
h = (diff * p).sum(axis=-1) / f # diff dot p / f
finally:
np.seterr(**err)
return h |
Returns +1 if edge[0]->point is clockwise from edge[0]->edge[1],
-1 if counterclockwise, and 0 if parallel.
def _orientation(self, edge, point):
""" Returns +1 if edge[0]->point is clockwise from edge[0]->edge[1],
-1 if counterclockwise, and 0 if parallel.
"""
v1 = self.pts[point] - self.pts[edge[0]]
v2 = self.pts[edge[1]] - self.pts[edge[0]]
c = np.cross(v1, v2) # positive if v1 is CW from v2
return 1 if c > 0 else (-1 if c < 0 else 0) |
Entry point of the IPython extension
Parameters
----------
IPython : IPython interpreter
An instance of the IPython interpreter that is handed
over to the extension
def load_ipython_extension(ipython):
""" Entry point of the IPython extension
Parameters
----------
IPython : IPython interpreter
An instance of the IPython interpreter that is handed
over to the extension
"""
import IPython
# don't continue if IPython version is < 3.0
ipy_version = LooseVersion(IPython.__version__)
if ipy_version < LooseVersion("3.0.0"):
ipython.write_err("Your IPython version is older than "
"version 3.0.0, the minimum for Vispy's"
"IPython backend. Please upgrade your IPython"
"version.")
return
_load_webgl_backend(ipython) |
Load the webgl backend for the IPython notebook
def _load_webgl_backend(ipython):
""" Load the webgl backend for the IPython notebook"""
from .. import app
app_instance = app.use_app("ipynb_webgl")
if app_instance.backend_name == "ipynb_webgl":
ipython.write("Vispy IPython module has loaded successfully")
else:
# TODO: Improve this error message
ipython.write_err("Unable to load webgl backend of Vispy") |
Draw collection
def draw(self, mode=None):
""" Draw collection """
if self._need_update:
self._update()
program = self._programs[0]
mode = mode or self._mode
if self._indices_list is not None:
program.draw(mode, self._indices_buffer)
else:
program.draw(mode) |
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
def gaussian_filter(data, sigma):
"""
Drop-in replacement for scipy.ndimage.gaussian_filter.
(note: results are only approximately equal to the output of
gaussian_filter)
"""
if np.isscalar(sigma):
sigma = (sigma,) * data.ndim
baseline = data.mean()
filtered = data - baseline
for ax in range(data.ndim):
s = float(sigma[ax])
if s == 0:
continue
# generate 1D gaussian kernel
ksize = int(s * 6)
x = np.arange(-ksize, ksize)
kernel = np.exp(-x**2 / (2*s**2))
kshape = [1, ] * data.ndim
kshape[ax] = len(kernel)
kernel = kernel.reshape(kshape)
# convolve as product of FFTs
shape = data.shape[ax] + ksize
scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) *
np.fft.rfft(kernel, shape, axis=ax),
axis=ax)
# clip off extra data
sl = [slice(None)] * data.ndim
sl[ax] = slice(filtered.shape[ax]-data.shape[ax], None, None)
filtered = filtered[sl]
return filtered + baseline |
Translate by an offset (x, y, z) .
Parameters
----------
offset : array-like, shape (3,)
Translation in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the translation.
def translate(offset, dtype=None):
"""Translate by an offset (x, y, z) .
Parameters
----------
offset : array-like, shape (3,)
Translation in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the translation.
"""
assert len(offset) == 3
x, y, z = offset
M = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[x, y, z, 1.0]], dtype)
return M |
Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
def scale(s, dtype=None):
"""Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
"""
assert len(s) == 3
return np.array(np.diag(np.concatenate([s, (1.,)])), dtype) |
The 3x3 rotation matrix for rotation about a vector.
Parameters
----------
angle : float
The angle of rotation, in degrees.
axis : ndarray
The x, y, z coordinates of the axis direction vector.
def rotate(angle, axis, dtype=None):
"""The 3x3 rotation matrix for rotation about a vector.
Parameters
----------
angle : float
The angle of rotation, in degrees.
axis : ndarray
The x, y, z coordinates of the axis direction vector.
"""
angle = np.radians(angle)
assert len(axis) == 3
x, y, z = axis / np.linalg.norm(axis)
c, s = math.cos(angle), math.sin(angle)
cx, cy, cz = (1 - c) * x, (1 - c) * y, (1 - c) * z
M = np.array([[cx * x + c, cy * x - z * s, cz * x + y * s, .0],
[cx * y + z * s, cy * y + c, cz * y - x * s, 0.],
[cx * z - y * s, cy * z + x * s, cz * z + c, 0.],
[0., 0., 0., 1.]], dtype).T
return M |
Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix
Parameters
----------
fovy : float
The field of view along the y axis.
aspect : float
Aspect ratio of the view.
znear : float
Near coordinate of the field of view.
zfar : float
Far coordinate of the field of view.
Returns
-------
M : ndarray
Perspective projection matrix (4x4).
"""
assert(znear != zfar)
h = math.tan(fovy / 360.0 * math.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar) |
Find a 3D transformation matrix that maps points1 onto points2.
Arguments are specified as arrays of four 3D coordinates, shape (4, 3).
def affine_map(points1, points2):
""" Find a 3D transformation matrix that maps points1 onto points2.
Arguments are specified as arrays of four 3D coordinates, shape (4, 3).
"""
A = np.ones((4, 4))
A[:, :3] = points1
B = np.ones((4, 4))
B[:, :3] = points2
# solve 3 sets of linear equations to determine
# transformation matrix elements
matrix = np.eye(4)
for i in range(3):
# solve Ax = B; x is one row of the desired transformation matrix
matrix[i] = np.linalg.solve(A, B[:, i])
return matrix |
Add a final message; flush the message list if no parent profiler.
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._new_msg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush() |
Create global Config object, parse command flags
def _init():
""" Create global Config object, parse command flags
"""
global config, _data_path, _allowed_config_keys
app_dir = _get_vispy_app_dir()
if app_dir is not None:
_data_path = op.join(app_dir, 'data')
_test_data_path = op.join(app_dir, 'test_data')
else:
_data_path = _test_data_path = None
# All allowed config keys and the types they may have
_allowed_config_keys = {
'data_path': string_types,
'default_backend': string_types,
'gl_backend': string_types,
'gl_debug': (bool,),
'glir_file': string_types+file_types,
'include_path': list,
'logging_level': string_types,
'qt_lib': string_types,
'dpi': (int, type(None)),
'profile': string_types + (type(None),),
'audit_tests': (bool,),
'test_data_path': string_types + (type(None),),
}
# Default values for all config options
default_config_options = {
'data_path': _data_path,
'default_backend': '',
'gl_backend': 'gl2',
'gl_debug': False,
'glir_file': '',
'include_path': [],
'logging_level': 'info',
'qt_lib': 'any',
'dpi': None,
'profile': None,
'audit_tests': False,
'test_data_path': _test_data_path,
}
config = Config(**default_config_options)
try:
config.update(**_load_config())
except Exception as err:
raise Exception('Error while reading vispy config file "%s":\n %s' %
(_get_config_fname(), err.message))
set_log_level(config['logging_level'])
_parse_command_line_arguments() |
Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
def _parse_command_line_arguments():
""" Transform vispy specific command line args to vispy config.
Put into a function so that any variables dont leak in the vispy namespace.
"""
global config
# Get command line args for vispy
argnames = ['vispy-backend=', 'vispy-gl-debug', 'vispy-glir-file=',
'vispy-log=', 'vispy-help', 'vispy-profile=', 'vispy-cprofile',
'vispy-dpi=', 'vispy-audit-tests']
try:
opts, args = getopt.getopt(sys.argv[1:], '', argnames)
except getopt.GetoptError:
opts = []
# Use them to set the config values
for o, a in opts:
if o.startswith('--vispy'):
if o == '--vispy-backend':
config['default_backend'] = a
logger.info('vispy backend: %s', a)
elif o == '--vispy-gl-debug':
config['gl_debug'] = True
elif o == '--vispy-glir-file':
config['glir_file'] = a
elif o == '--vispy-log':
if ',' in a:
verbose, match = a.split(',')
else:
verbose = a
match = None
config['logging_level'] = a
set_log_level(verbose, match)
elif o == '--vispy-profile':
config['profile'] = a
elif o == '--vispy-cprofile':
_enable_profiling()
elif o == '--vispy-help':
print(VISPY_HELP)
elif o == '--vispy-dpi':
config['dpi'] = int(a)
elif o == '--vispy-audit-tests':
config['audit_tests'] = True
else:
logger.warning("Unsupported vispy flag: %s" % o) |
Helper to get the default directory for storing vispy data
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path |
Helper for the vispy config file
def _get_config_fname():
"""Helper for the vispy config file"""
directory = _get_vispy_app_dir()
if directory is None:
return None
fname = op.join(directory, 'vispy.json')
if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:
fname = op.join(_TempDir(), 'vispy.json')
return fname |
Helper to load prefs from ~/.vispy/vispy.json
def _load_config():
"""Helper to load prefs from ~/.vispy/vispy.json"""
fname = _get_config_fname()
if fname is None or not op.isfile(fname):
return dict()
with open(fname, 'r') as fid:
config = json.load(fid)
return config |
Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
def save_config(**kwargs):
"""Save configuration keys to vispy config file
Parameters
----------
**kwargs : keyword arguments
Key/value pairs to save to the config file.
"""
if kwargs == {}:
kwargs = config._config
current_config = _load_config()
current_config.update(**kwargs)
# write to disk
fname = _get_config_fname()
if fname is None:
raise RuntimeError('config filename could not be determined')
if not op.isdir(op.dirname(fname)):
os.mkdir(op.dirname(fname))
with open(fname, 'w') as fid:
json.dump(current_config, fid, sort_keys=True, indent=0) |
Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
def set_data_dir(directory=None, create=False, save=False):
"""Set vispy data download directory
Parameters
----------
directory : str | None
The directory to use.
create : bool
If True, create directory if it doesn't exist.
save : bool
If True, save the configuration to the vispy config.
"""
if directory is None:
directory = _data_path
if _data_path is None:
raise IOError('default path cannot be determined, please '
'set it manually (directory != None)')
if not op.isdir(directory):
if not create:
raise IOError('directory "%s" does not exist, perhaps try '
'create=True to create it?' % directory)
os.mkdir(directory)
config.update(data_path=directory)
if save:
save_config(data_path=directory) |
Start profiling and register callback to print stats when the program
exits.
def _enable_profiling():
""" Start profiling and register callback to print stats when the program
exits.
"""
import cProfile
import atexit
global _profiler
_profiler = cProfile.Profile()
_profiler.enable()
atexit.register(_profile_atexit) |
Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
def sys_info(fname=None, overwrite=False):
"""Get relevant system and debugging information
Parameters
----------
fname : str | None
Filename to dump info to. Use None to simply print.
overwrite : bool
If True, overwrite file (if it exists).
Returns
-------
out : str
The system information as a string.
"""
if fname is not None and op.isfile(fname) and not overwrite:
raise IOError('file exists, use overwrite=True to overwrite')
out = ''
try:
# Nest all imports here to avoid any circular imports
from ..app import use_app, Canvas
from ..app.backends import BACKEND_NAMES
from ..gloo import gl
from ..testing import has_backend
# get default app
with use_log_level('warning'):
app = use_app(call_reuse=False) # suppress messages
out += 'Platform: %s\n' % platform.platform()
out += 'Python: %s\n' % str(sys.version).replace('\n', ' ')
out += 'Backend: %s\n' % app.backend_name
for backend in BACKEND_NAMES:
if backend.startswith('ipynb_'):
continue
with use_log_level('warning', print_msg=False):
which = has_backend(backend, out=['which'])[1]
out += '{0:<9} {1}\n'.format(backend + ':', which)
out += '\n'
# We need an OpenGL context to get GL info
canvas = Canvas('Test', (10, 10), show=False, app=app)
canvas._backend._vispy_set_current()
out += 'GL version: %r\n' % (gl.glGetParameter(gl.GL_VERSION),)
x_ = gl.GL_MAX_TEXTURE_SIZE
out += 'MAX_TEXTURE_SIZE: %r\n' % (gl.glGetParameter(x_),)
out += 'Extensions: %r\n' % (gl.glGetParameter(gl.GL_EXTENSIONS),)
canvas.close()
except Exception: # don't stop printing info
out += '\nInfo-gathering error:\n%s' % traceback.format_exc()
pass
if fname is not None:
with open(fname, 'w') as fid:
fid.write(out)
return out |
Compact vertices and indices within given tolerance
def compact(vertices, indices, tolerance=1e-3):
""" Compact vertices and indices within given tolerance """
# Transform vertices into a structured array for np.unique to work
n = len(vertices)
V = np.zeros(n, dtype=[("pos", np.float32, 3)])
V["pos"][:, 0] = vertices[:, 0]
V["pos"][:, 1] = vertices[:, 1]
V["pos"][:, 2] = vertices[:, 2]
epsilon = 1e-3
decimals = int(np.log(epsilon)/np.log(1/10.))
# Round all vertices within given decimals
V_ = np.zeros_like(V)
X = V["pos"][:, 0].round(decimals=decimals)
X[np.where(abs(X) < epsilon)] = 0
V_["pos"][:, 0] = X
Y = V["pos"][:, 1].round(decimals=decimals)
Y[np.where(abs(Y) < epsilon)] = 0
V_["pos"][:, 1] = Y
Z = V["pos"][:, 2].round(decimals=decimals)
Z[np.where(abs(Z) < epsilon)] = 0
V_["pos"][:, 2] = Z
# Find the unique vertices AND the mapping
U, RI = np.unique(V_, return_inverse=True)
# Translate indices from original vertices into the reduced set (U)
indices = indices.ravel()
I_ = indices.copy().ravel()
for i in range(len(indices)):
I_[i] = RI[indices[i]]
I_ = I_.reshape(len(indices)/3, 3)
# Return reduced vertices set, transalted indices and mapping that allows
# to go from U to V
return U.view(np.float32).reshape(len(U), 3), I_, RI |
Compute normals over a triangulated surface
Parameters
----------
vertices : ndarray (n,3)
triangles vertices
indices : ndarray (p,3)
triangles indices
def normals(vertices, indices):
"""
Compute normals over a triangulated surface
Parameters
----------
vertices : ndarray (n,3)
triangles vertices
indices : ndarray (p,3)
triangles indices
"""
# Compact similar vertices
vertices, indices, mapping = compact(vertices, indices)
T = vertices[indices]
N = np.cross(T[:, 1] - T[:, 0], T[:, 2]-T[:, 0])
L = np.sqrt(np.sum(N * N, axis=1))
L[L == 0] = 1.0 # prevent divide-by-zero
N /= L[:, np.newaxis]
normals = np.zeros_like(vertices)
normals[indices[:, 0]] += N
normals[indices[:, 1]] += N
normals[indices[:, 2]] += N
L = np.sqrt(np.sum(normals*normals, axis=1))
L[L == 0] = 1.0
normals /= L[:, np.newaxis]
return normals[mapping] |
Create the native widget if not already done so. If the widget
is already created, this function does nothing.
def create_native(self):
""" Create the native widget if not already done so. If the widget
is already created, this function does nothing.
"""
if self._backend is not None:
return
# Make sure that the app is active
assert self._app.native
# Instantiate the backend with the right class
self._app.backend_module.CanvasBackend(self, **self._backend_kwargs)
# self._backend = set by BaseCanvasBackend
self._backend_kwargs = None # Clean up
# Connect to draw event (append to the end)
# Process GLIR commands at each paint event
self.events.draw.connect(self.context.flush_commands, position='last')
if self._autoswap:
self.events.draw.connect((self, 'swap_buffers'),
ref=True, position='last') |
Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
def connect(self, fun):
""" Connect a function to an event
The name of the function
should be on_X, with X the name of the event (e.g. 'on_draw').
This method is typically used as a decorator on a function
definition for an event handler.
Parameters
----------
fun : callable
The function.
"""
# Get and check name
name = fun.__name__
if not name.startswith('on_'):
raise ValueError('When connecting a function based on its name, '
'the name should start with "on_"')
eventname = name[3:]
# Get emitter
try:
emitter = self.events[eventname]
except KeyError:
raise ValueError(
'Event "%s" not available on this canvas.' %
eventname)
# Connect
emitter.connect(fun) |
The size of canvas/window
def size(self):
""" The size of canvas/window """
size = self._backend._vispy_get_size()
return (size[0] // self._px_scale, size[1] // self._px_scale) |
Show or hide the canvas
Parameters
----------
visible : bool
Make the canvas visible.
run : bool
Run the backend event loop.
def show(self, visible=True, run=False):
"""Show or hide the canvas
Parameters
----------
visible : bool
Make the canvas visible.
run : bool
Run the backend event loop.
"""
self._backend._vispy_set_visible(visible)
if run:
self.app.run() |
Close the canvas
Notes
-----
This will usually destroy the GL context. For Qt, the context
(and widget) will be destroyed only if the widget is top-level.
To avoid having the widget destroyed (more like standard Qt
behavior), consider making the widget a sub-widget.
def close(self):
"""Close the canvas
Notes
-----
This will usually destroy the GL context. For Qt, the context
(and widget) will be destroyed only if the widget is top-level.
To avoid having the widget destroyed (more like standard Qt
behavior), consider making the widget a sub-widget.
"""
if self._backend is not None and not self._closed:
self._closed = True
self.events.close()
self._backend._vispy_close()
forget_canvas(self) |
Update the fps after every window
def _update_fps(self, event):
"""Update the fps after every window"""
self._frame_count += 1
diff = time() - self._basetime
if (diff > self._fps_window):
self._fps = self._frame_count / diff
self._basetime = time()
self._frame_count = 0
self._fps_callback(self.fps) |
Measure the current FPS
Sets the update window, connects the draw event to update_fps
and sets the callback function.
Parameters
----------
window : float
The time-window (in seconds) to calculate FPS. Default 1.0.
callback : function | str
The function to call with the float FPS value, or the string
to be formatted with the fps value and then printed. The
default is ``'%1.1f FPS'``. If callback evaluates to False, the
FPS measurement is stopped.
def measure_fps(self, window=1, callback='%1.1f FPS'):
"""Measure the current FPS
Sets the update window, connects the draw event to update_fps
and sets the callback function.
Parameters
----------
window : float
The time-window (in seconds) to calculate FPS. Default 1.0.
callback : function | str
The function to call with the float FPS value, or the string
to be formatted with the fps value and then printed. The
default is ``'%1.1f FPS'``. If callback evaluates to False, the
FPS measurement is stopped.
"""
# Connect update_fps function to draw
self.events.draw.disconnect(self._update_fps)
if callback:
if isinstance(callback, string_types):
callback_str = callback # because callback gets overwritten
def callback(x):
print(callback_str % x)
self._fps_window = window
self.events.draw.connect(self._update_fps)
self._fps_callback = callback
else:
self._fps_callback = None |
Render the canvas to an offscreen buffer and return the image
array.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
def render(self):
""" Render the canvas to an offscreen buffer and return the image
array.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
self.set_current()
size = self.physical_size
fbo = FrameBuffer(color=RenderBuffer(size[::-1]),
depth=RenderBuffer(size[::-1]))
try:
fbo.activate()
self.events.draw()
return fbo.read()
finally:
fbo.deactivate() |
Return a list of all mouse events in the current drag operation.
Returns None if there is no current drag operation.
def drag_events(self):
""" Return a list of all mouse events in the current drag operation.
Returns None if there is no current drag operation.
"""
if not self.is_dragging:
return None
event = self
events = []
while True:
# mouse_press events can only be the start of a trail
if event is None or event.type == 'mouse_press':
break
events.append(event)
event = event.last_event
return events[::-1] |
Return an (N, 2) array of mouse coordinates for every event in the
current mouse drag operation.
Returns None if there is no current drag operation.
def trail(self):
""" Return an (N, 2) array of mouse coordinates for every event in the
current mouse drag operation.
Returns None if there is no current drag operation.
"""
events = self.drag_events()
if events is None:
return None
trail = np.empty((len(events), 2), dtype=int)
for i, ev in enumerate(events):
trail[i] = ev.pos
return trail |
Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
def width_min(self, width_min):
"""Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
"""
if width_min is None:
self._width_limits[0] = 0
return
width_min = float(width_min)
assert(0 <= width_min)
self._width_limits[0] = width_min
self._update_layout() |
Set the maximum width of the widget.
Parameters
----------
width_max: None | float
the maximum width of the widget. if None, maximum width
is unbounded
def width_max(self, width_max):
"""Set the maximum width of the widget.
Parameters
----------
width_max: None | float
the maximum width of the widget. if None, maximum width
is unbounded
"""
if width_max is None:
self._width_limits[1] = None
return
width_max = float(width_max)
assert(self.width_min <= width_max)
self._width_limits[1] = width_max
self._update_layout() |
Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
def height_min(self, height_min):
"""Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
"""
if height_min is None:
self._height_limits[0] = 0
return
height_min = float(height_min)
assert(height_min >= 0)
self._height_limits[0] = height_min
self._update_layout() |
Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
def height_max(self, height_max):
"""Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
"""
if height_max is None:
self._height_limits[1] = None
return
height_max = float(height_max)
assert(0 <= self.height_min <= height_max)
self._height_limits[1] = height_max
self._update_layout() |
The rectangular area inside the margin, border, and padding.
Generally widgets should avoid drawing or placing sub-widgets outside
this rectangle.
def inner_rect(self):
"""The rectangular area inside the margin, border, and padding.
Generally widgets should avoid drawing or placing sub-widgets outside
this rectangle.
"""
m = self.margin + self._border_width + self.padding
if not self.border_color.is_blank:
m += 1
return Rect((m, m), (self.size[0]-2*m, self.size[1]-2*m)) |
Called whenever the clipper for this widget may need to be updated.
def _update_clipper(self):
"""Called whenever the clipper for this widget may need to be updated.
"""
if self.clip_children and self._clipper is None:
self._clipper = Clipper()
elif not self.clip_children:
self._clipper = None
if self._clipper is None:
return
self._clipper.rect = self.inner_rect
self._clipper.transform = self.get_transform('framebuffer', 'visual') |
Update border line to match new shape
def _update_line(self):
""" Update border line to match new shape """
w = self._border_width
m = self.margin
# border is drawn within the boundaries of the widget:
#
# size = (8, 7) margin=2
# internal rect = (3, 3, 2, 1)
# ........
# ........
# ..BBBB..
# ..B B..
# ..BBBB..
# ........
# ........
#
l = b = m
r = self.size[0] - m
t = self.size[1] - m
pos = np.array([
[l, b], [l+w, b+w],
[r, b], [r-w, b+w],
[r, t], [r-w, t-w],
[l, t], [l+w, t-w],
], dtype=np.float32)
faces = np.array([
[0, 2, 1],
[1, 2, 3],
[2, 4, 3],
[3, 5, 4],
[4, 5, 6],
[5, 7, 6],
[6, 0, 7],
[7, 0, 1],
[5, 3, 1],
[1, 5, 7],
], dtype=np.int32)
start = 8 if self._border_color.is_blank else 0
stop = 8 if self._bgcolor.is_blank else 10
face_colors = None
if self._face_colors is not None:
face_colors = self._face_colors[start:stop]
self._mesh.set_data(vertices=pos, faces=faces[start:stop],
face_colors=face_colors)
# picking mesh covers the entire area
self._picking_mesh.set_data(vertices=pos[::2]) |
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
def add_widget(self, widget):
"""
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
"""
self._widgets.append(widget)
widget.parent = self
self._update_child_widgets()
return widget |
Create a new Grid and add it as a child widget.
All arguments are given to Grid().
def add_grid(self, *args, **kwargs):
"""
Create a new Grid and add it as a child widget.
All arguments are given to Grid().
"""
from .grid import Grid
grid = Grid(*args, **kwargs)
return self.add_widget(grid) |
Create a new ViewBox and add it as a child widget.
All arguments are given to ViewBox().
def add_view(self, *args, **kwargs):
"""
Create a new ViewBox and add it as a child widget.
All arguments are given to ViewBox().
"""
from .viewbox import ViewBox
view = ViewBox(*args, **kwargs)
return self.add_widget(view) |
Remove a Widget as a managed child of this Widget.
Parameters
----------
widget : instance of Widget
The widget to remove.
def remove_widget(self, widget):
"""
Remove a Widget as a managed child of this Widget.
Parameters
----------
widget : instance of Widget
The widget to remove.
"""
self._widgets.remove(widget)
widget.parent = None
self._update_child_widgets() |
Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack |
Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tostring(),
np.ubyte).reshape((value.shape + (4,))) |
Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names |
List system fonts
Returns
-------
fonts : list of str
List of system fonts.
def list_fonts():
"""List system fonts
Returns
-------
fonts : list of str
List of system fonts.
"""
vals = _list_fonts()
for font in _vispy_fonts:
vals += [font] if font not in vals else []
vals = sorted(vals, key=lambda s: s.lower())
return vals |
A decorator ensuring that the decorated function tun time does not
exceeds the argument limit.
:args limit: the time limit
:type limit: int
:args handler: the handler function called when the decorated
function times out.
:type handler: callable
Example:
>>>def timeout_handler(limit, f, *args, **kwargs):
... print "{func} call timed out after {lim}s.".format(
... func=f.__name__, lim=limit)
...
>>>@timeout(limit=5, handler=timeout_handler)
... def work(foo, bar, baz="spam")
... time.sleep(10)
>>>work("foo", "bar", "baz")
# time passes...
work call timed out after 5s.
>>>
def timeout(limit, handler):
"""A decorator ensuring that the decorated function tun time does not
exceeds the argument limit.
:args limit: the time limit
:type limit: int
:args handler: the handler function called when the decorated
function times out.
:type handler: callable
Example:
>>>def timeout_handler(limit, f, *args, **kwargs):
... print "{func} call timed out after {lim}s.".format(
... func=f.__name__, lim=limit)
...
>>>@timeout(limit=5, handler=timeout_handler)
... def work(foo, bar, baz="spam")
... time.sleep(10)
>>>work("foo", "bar", "baz")
# time passes...
work call timed out after 5s.
>>>
"""
def wrapper(f):
def wrapped_f(*args, **kwargs):
old_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(limit)
try:
res = f(*args, **kwargs)
except Timeout:
handler(limit, f, args, kwargs)
else:
return res
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return wrapped_f
return wrapper |
Simple utility to retrieve kwargs in predetermined order.
Also checks whether the values of the backend arguments do not
violate the backend capabilities.
def _process_backend_kwargs(self, kwargs):
""" Simple utility to retrieve kwargs in predetermined order.
Also checks whether the values of the backend arguments do not
violate the backend capabilities.
"""
# Verify given argument with capability of the backend
app = self._vispy_canvas.app
capability = app.backend_module.capability
if kwargs['context'].shared.name: # name already assigned: shared
if not capability['context']:
raise RuntimeError('Cannot share context with this backend')
for key in [key for (key, val) in capability.items() if not val]:
if key in ['context', 'multi_window', 'scroll']:
continue
invert = key in ['resizable', 'decorate']
if bool(kwargs[key]) - invert:
raise RuntimeError('Config %s is not supported by backend %s'
% (key, app.backend_name))
# Return items in sequence
out = SimpleBunch()
keys = ['title', 'size', 'position', 'show', 'vsync', 'resizable',
'decorate', 'fullscreen', 'parent', 'context', 'always_on_top',
]
for key in keys:
out[key] = kwargs[key]
return out |
Reset the view.
def _set_range(self, init):
""" Reset the view.
"""
#PerspectiveCamera._set_range(self, init)
# Stop moving
self._speed *= 0.0
# Get window size (and store factor now to sync with resizing)
w, h = self._viewbox.size
w, h = float(w), float(h)
# Get range and translation for x and y
x1, y1, z1 = self._xlim[0], self._ylim[0], self._zlim[0]
x2, y2, z2 = self._xlim[1], self._ylim[1], self._zlim[1]
rx, ry, rz = (x2 - x1), (y2 - y1), (z2 - z1)
# Correct ranges for window size. Note that the window width
# influences the x and y data range, while the height influences
# the z data range.
if w / h > 1:
rx /= w / h
ry /= w / h
else:
rz /= h / w
# Do not convert to screen coordinates. This camera does not need
# to fit everything on screen, but we need to estimate the scale
# of the data in the scene.
# Set scale, depending on data range. Initial speed is such that
# the scene can be traversed in about three seconds.
self._scale_factor = max(rx, ry, rz) / 3.0
# Set initial position to a corner of the scene
margin = np.mean([rx, ry, rz]) * 0.1
self._center = x1 - margin, y1 - margin, z1 + margin
# Determine initial view direction based on flip axis
yaw = 45 * self._flip_factors[0]
pitch = -90 - 20 * self._flip_factors[2]
if self._flip_factors[1] < 0:
yaw += 90 * np.sign(self._flip_factors[0])
# Set orientation
q1 = Quaternion.create_from_axis_angle(pitch*math.pi/180, 1, 0, 0)
q2 = Quaternion.create_from_axis_angle(0*math.pi/180, 0, 1, 0)
q3 = Quaternion.create_from_axis_angle(yaw*math.pi/180, 0, 0, 1)
#
self._rotation1 = (q1 * q2 * q3).normalize()
self._rotation2 = Quaternion()
# Update
self.view_changed() |
Timer event handler
Parameters
----------
event : instance of Event
The event.
def on_timer(self, event):
"""Timer event handler
Parameters
----------
event : instance of Event
The event.
"""
# Set relative speed and acceleration
rel_speed = event.dt
rel_acc = 0.1
# Get what's forward
pf, pr, pl, pu = self._get_directions()
# Increase speed through acceleration
# Note that self._speed is relative. We can balance rel_acc and
# rel_speed to get a nice smooth or direct control
self._speed += self._acc * rel_acc
# Reduce speed. Simulate resistance. Using brakes slows down faster.
# Note that the way that we reduce speed, allows for higher
# speeds if keys ar bound to higher acc values (i.e. turbo)
reduce = np.array([0.05, 0.05, 0.05, 0.1, 0.1, 0.1])
reduce[self._brake > 0] = 0.2
self._speed -= self._speed * reduce
if np.abs(self._speed).max() < 0.05:
self._speed *= 0.0
# --- Determine new position from translation speed
if self._speed[:3].any():
# Create speed vectors, use scale_factor as a reference
dv = np.array([1.0/d for d in self._flip_factors])
#
vf = pf * dv * rel_speed * self._scale_factor
vr = pr * dv * rel_speed * self._scale_factor
vu = pu * dv * rel_speed * self._scale_factor
direction = vf, vr, vu
# Set position
center_loc = np.array(self._center, dtype='float32')
center_loc += (self._speed[0] * direction[0] +
self._speed[1] * direction[1] +
self._speed[2] * direction[2])
self._center = tuple(center_loc)
# --- Determine new orientation from rotation speed
roll_angle = 0
# Calculate manual roll (from speed)
if self._speed[3:].any():
angleGain = np.array([1.0, 1.5, 1.0]) * 3 * math.pi / 180
angles = self._speed[3:] * angleGain
q1 = Quaternion.create_from_axis_angle(angles[0], -1, 0, 0)
q2 = Quaternion.create_from_axis_angle(angles[1], 0, 1, 0)
q3 = Quaternion.create_from_axis_angle(angles[2], 0, 0, -1)
q = q1 * q2 * q3
self._rotation1 = (q * self._rotation1).normalize()
# Calculate auto-roll
if self.auto_roll:
up = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}[self.up[1]]
up = np.array(up) * {'+': +1, '-': -1}[self.up[0]]
def angle(p1, p2):
return np.arccos(p1.dot(p2))
#au = angle(pu, (0, 0, 1))
ar = angle(pr, up)
al = angle(pl, up)
af = angle(pf, up)
# Roll angle that's off from being leveled (in unit strength)
roll_angle = math.sin(0.5*(al - ar))
# Correct for pitch
roll_angle *= abs(math.sin(af)) # abs(math.sin(au))
if abs(roll_angle) < 0.05:
roll_angle = 0
if roll_angle:
# Correct to soften the force at 90 degree angle
roll_angle = np.sign(roll_angle) * np.abs(roll_angle)**0.5
# Get correction for this iteration and apply
angle_correction = 1.0 * roll_angle * math.pi / 180
q = Quaternion.create_from_axis_angle(angle_correction,
0, 0, 1)
self._rotation1 = (q * self._rotation1).normalize()
# Update
if self._speed.any() or roll_angle or self._update_from_mouse:
self._update_from_mouse = False
self.view_changed() |
ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
def viewbox_key_event(self, event):
"""ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
"""
PerspectiveCamera.viewbox_key_event(self, event)
if event.handled or not self.interactive:
return
# Ensure the timer runs
if not self._timer.running:
self._timer.start()
if event.key in self._keymap:
val_dims = self._keymap[event.key]
val = val_dims[0]
# Brake or accelarate?
if val == 0:
vec = self._brake
val = 1
else:
vec = self._acc
# Set
if event.type == 'key_release':
val = 0
for dim in val_dims[1:]:
factor = 1.0
vec[dim-1] = val * factor |
ViewBox mouse event handler
Parameters
----------
event : instance of Event
The event.
def viewbox_mouse_event(self, event):
"""ViewBox mouse event handler
Parameters
----------
event : instance of Event
The event.
"""
PerspectiveCamera.viewbox_mouse_event(self, event)
if event.handled or not self.interactive:
return
if event.type == 'mouse_wheel':
if not event.mouse_event.modifiers:
# Move forward / backward
self._speed[0] += 0.5 * event.delta[1]
elif keys.SHIFT in event.mouse_event.modifiers:
# Speed
s = 1.1 ** - event.delta[1]
self.scale_factor /= s # divide instead of multiply
print('scale factor: %1.1f units/s' % self.scale_factor)
return
if event.type == 'mouse_press':
event.handled = True
if event.type == 'mouse_release':
# Reset
self._event_value = None
# Apply rotation
self._rotation1 = (self._rotation2 * self._rotation1).normalize()
self._rotation2 = Quaternion()
elif not self._timer.running:
# Ensure the timer runs
self._timer.start()
if event.type == 'mouse_move':
if event.press_event is None:
return
if not event.buttons:
return
# Prepare
modifiers = event.mouse_event.modifiers
pos1 = event.mouse_event.press_event.pos
pos2 = event.mouse_event.pos
w, h = self._viewbox.size
if 1 in event.buttons and not modifiers:
# rotate
# get normalized delta values
d_az = -float(pos2[0] - pos1[0]) / w
d_el = +float(pos2[1] - pos1[1]) / h
# Apply gain
d_az *= - 0.5 * math.pi # * self._speed_rot
d_el *= + 0.5 * math.pi # * self._speed_rot
# Create temporary quaternions
q_az = Quaternion.create_from_axis_angle(d_az, 0, 1, 0)
q_el = Quaternion.create_from_axis_angle(d_el, 1, 0, 0)
# Apply to global quaternion
self._rotation2 = (q_el.normalize() * q_az).normalize()
elif 2 in event.buttons and keys.CONTROL in modifiers:
# zoom --> fov
if self._event_value is None:
self._event_value = self._fov
p1 = np.array(event.press_event.pos)[:2]
p2 = np.array(event.pos)[:2]
p1c = event.map_to_canvas(p1)[:2]
p2c = event.map_to_canvas(p2)[:2]
d = p2c - p1c
fov = self._event_value * math.exp(-0.01*d[1])
self._fov = min(90.0, max(10, fov))
# Make transform be updated on the next timer tick.
# By doing it at timer tick, we avoid shaky behavior
self._update_from_mouse = True |
Return True if there's something to read on stdin (posix version).
def _stdin_ready_posix():
"""Return True if there's something to read on stdin (posix version)."""
infds, outfds, erfds = select.select([sys.stdin],[],[],0)
return bool(infds) |
Set PyOS_InputHook to callback and return the previous one.
def set_inputhook(self, callback):
"""Set PyOS_InputHook to callback and return the previous one."""
# On platforms with 'readline' support, it's all too likely to
# have a KeyboardInterrupt signal delivered *even before* an
# initial ``try:`` clause in the callback can be executed, so
# we need to disable CTRL+C in this situation.
ignore_CTRL_C()
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original |
Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`.
def clear_inputhook(self, app=None):
"""Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
allow_CTRL_C()
self._reset()
return original |
Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self.apps = {}
elif gui in self.apps:
del self.apps[gui] |
Register a class to provide the event loop for a given GUI.
This is intended to be used as a class decorator. It should be passed
the names with which to register this GUI integration. The classes
themselves should subclass :class:`InputHookBase`.
::
@inputhook_manager.register('qt')
class QtInputHook(InputHookBase):
def enable(self, app=None):
...
def register(self, toolkitname, *aliases):
"""Register a class to provide the event loop for a given GUI.
This is intended to be used as a class decorator. It should be passed
the names with which to register this GUI integration. The classes
themselves should subclass :class:`InputHookBase`.
::
@inputhook_manager.register('qt')
class QtInputHook(InputHookBase):
def enable(self, app=None):
...
"""
def decorator(cls):
inst = cls(self)
self.guihooks[toolkitname] = inst
for a in aliases:
self.aliases[a] = toolkitname
return cls
return decorator |
Switch amongst GUI input hooks by name.
This is a higher level method than :meth:`set_inputhook` - it uses the
GUI name to look up a registered object which enables the input hook
for that GUI.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
def enable_gui(self, gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is a higher level method than :meth:`set_inputhook` - it uses the
GUI name to look up a registered object which enables the input hook
for that GUI.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if gui in (None, GUI_NONE):
return self.disable_gui()
if gui in self.aliases:
return self.enable_gui(self.aliases[gui], app)
try:
gui_hook = self.guihooks[gui]
except KeyError:
e = "Invalid GUI request {!r}, valid ones are: {}"
raise ValueError(e.format(gui, ', '.join(self.guihooks)))
self._current_gui = gui
app = gui_hook.enable(app)
if app is not None:
app._in_event_loop = True
self.apps[gui] = app
return app |
Disable GUI event loop integration.
If an application was registered, this sets its ``_in_event_loop``
attribute to False. It then calls :meth:`clear_inputhook`.
def disable_gui(self):
"""Disable GUI event loop integration.
If an application was registered, this sets its ``_in_event_loop``
attribute to False. It then calls :meth:`clear_inputhook`.
"""
gui = self._current_gui
if gui in self.apps:
self.apps[gui]._in_event_loop = False
return self.clear_inputhook() |
Make a canvas active. Used primarily by the canvas itself.
def set_current_canvas(canvas):
""" Make a canvas active. Used primarily by the canvas itself.
"""
# Notify glir
canvas.context._do_CURRENT_command = True
# Try to be quick
if canvasses and canvasses[-1]() is canvas:
return
# Make this the current
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
cc.append(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] |
Forget about the given canvas. Used by the canvas when closed.
def forget_canvas(canvas):
""" Forget about the given canvas. Used by the canvas when closed.
"""
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] |
For the app backends to create the GLShared object.
Parameters
----------
name : str
The name.
ref : object
The reference.
def create_shared(self, name, ref):
""" For the app backends to create the GLShared object.
Parameters
----------
name : str
The name.
ref : object
The reference.
"""
if self._shared is not None:
raise RuntimeError('Can only set_shared once.')
self._shared = GLShared(name, ref) |
Flush
Parameters
----------
event : instance of Event
The event.
def flush_commands(self, event=None):
""" Flush
Parameters
----------
event : instance of Event
The event.
"""
if self._do_CURRENT_command:
self._do_CURRENT_command = False
canvas = get_current_canvas()
if canvas and hasattr(canvas, '_backend'):
fbo = canvas._backend._vispy_get_fb_bind_location()
else:
fbo = 0
self.shared.parser.parse([('CURRENT', 0, fbo)])
self.glir.flush(self.shared.parser) |
Add a reference for the backend object that gives access
to the low level context. Used in vispy.app.canvas.backends.
The given name must match with that of previously added
references.
def add_ref(self, name, ref):
""" Add a reference for the backend object that gives access
to the low level context. Used in vispy.app.canvas.backends.
The given name must match with that of previously added
references.
"""
if self._name is None:
self._name = name
elif name != self._name:
raise RuntimeError('Contexts can only share between backends of '
'the same type')
self._refs.append(weakref.ref(ref)) |
A reference (stored internally via a weakref) to an object
that the backend system can use to obtain the low-level
information of the "reference context". In Vispy this will
typically be the CanvasBackend object.
def ref(self):
""" A reference (stored internally via a weakref) to an object
that the backend system can use to obtain the low-level
information of the "reference context". In Vispy this will
typically be the CanvasBackend object.
"""
# Clean
self._refs = [r for r in self._refs if (r() is not None)]
# Get ref
ref = self._refs[0]() if self._refs else None
if ref is not None:
return ref
else:
raise RuntimeError('No reference for available for GLShared') |
Get screen DPI from the OS
Parameters
----------
raise_error : bool
If True, raise an error if DPI could not be determined.
Returns
-------
dpi : float
Dots per inch of the primary screen.
def get_dpi(raise_error=True):
"""Get screen DPI from the OS
Parameters
----------
raise_error : bool
If True, raise an error if DPI could not be determined.
Returns
-------
dpi : float
Dots per inch of the primary screen.
"""
display = quartz.CGMainDisplayID()
mm = quartz.CGDisplayScreenSize(display)
px = quartz.CGDisplayBounds(display).size
return (px.width/mm.width + px.height/mm.height) * 0.5 * 25.4 |
Link this Varying to another object from which it will derive its
dtype. This method is used internally when assigning an attribute to
a varying using syntax ``Function[varying] = attr``.
def link(self, var):
""" Link this Varying to another object from which it will derive its
dtype. This method is used internally when assigning an attribute to
a varying using syntax ``Function[varying] = attr``.
"""
assert self._dtype is not None or hasattr(var, 'dtype')
self._link = var
self.changed() |
Two Dimensional Shubert Function
def obj(x):
"""Two Dimensional Shubert Function"""
j = np.arange(1, 6)
tmp1 = np.dot(j, np.cos((j+1)*x[0] + j))
tmp2 = np.dot(j, np.cos((j+1)*x[1] + j))
return tmp1 * tmp2 |
Function BESJ calculates Bessel function of first kind of order n
Arguments:
n - an integer (>=0), the order
x - value at which the Bessel function is required
--------------------
C++ Mathematical Library
Converted from equivalent FORTRAN library
Converted by Gareth Walker for use by course 392 computational project
All functions tested and yield the same results as the corresponding
FORTRAN versions.
If you have any problems using these functions please report them to
M.Muldoon@UMIST.ac.uk
Documentation available on the web
http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html
Version 1.0 8/98
29 October, 1999
--------------------
Adapted for use in AGG library by
Andy Wilk (castor.vulgaris@gmail.com)
Adapted for use in vispy library by
Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
-----------------------------------------------------------------------
def besj(self, x, n):
'''
Function BESJ calculates Bessel function of first kind of order n
Arguments:
n - an integer (>=0), the order
x - value at which the Bessel function is required
--------------------
C++ Mathematical Library
Converted from equivalent FORTRAN library
Converted by Gareth Walker for use by course 392 computational project
All functions tested and yield the same results as the corresponding
FORTRAN versions.
If you have any problems using these functions please report them to
M.Muldoon@UMIST.ac.uk
Documentation available on the web
http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html
Version 1.0 8/98
29 October, 1999
--------------------
Adapted for use in AGG library by
Andy Wilk (castor.vulgaris@gmail.com)
Adapted for use in vispy library by
Nicolas P. Rougier (Nicolas.Rougier@inria.fr)
-----------------------------------------------------------------------
'''
if n < 0:
return 0.0
d = 1e-6
b = 0
if math.fabs(x) <= d:
if n != 0:
return 0
return 1
b1 = 0 # b1 is the value from the previous iteration
# Set up a starting order for recurrence
m1 = int(math.fabs(x)) + 6
if math.fabs(x) > 5:
m1 = int(math.fabs(1.4 * x + 60 / x))
m2 = int(n + 2 + math.fabs(x) / 4)
if m1 > m2:
m2 = m1
# Apply recurrence down from curent max order
while True:
c3 = 0
c2 = 1e-30
c4 = 0
m8 = 1
if m2 / 2 * 2 == m2:
m8 = -1
imax = m2 - 2
for i in range(1, imax+1):
c6 = 2 * (m2 - i) * c2 / x - c3
c3 = c2
c2 = c6
if m2 - i - 1 == n:
b = c6
m8 = -1 * m8
if m8 > 0:
c4 = c4 + 2 * c6
c6 = 2 * c2 / x - c3
if n == 0:
b = c6
c4 += c6
b /= c4
if math.fabs(b - b1) < d:
return b
b1 = b
m2 += 3 |
Create an exact copy of this quaternion.
def copy(self):
""" Create an exact copy of this quaternion.
"""
return Quaternion(self.w, self.x, self.y, self.z, False) |
Returns the norm of the quaternion
norm = w**2 + x**2 + y**2 + z**2
def norm(self):
""" Returns the norm of the quaternion
norm = w**2 + x**2 + y**2 + z**2
"""
tmp = self.w**2 + self.x**2 + self.y**2 + self.z**2
return tmp**0.5 |
Make the quaternion unit length.
def _normalize(self):
""" Make the quaternion unit length.
"""
# Get length
L = self.norm()
if not L:
raise ValueError('Quaternion cannot have 0-length.')
# Correct
self.w /= L
self.x /= L
self.y /= L
self.z /= L |
Obtain the conjugate of the quaternion.
This is simply the same quaternion but with the sign of the
imaginary (vector) parts reversed.
def conjugate(self):
""" Obtain the conjugate of the quaternion.
This is simply the same quaternion but with the sign of the
imaginary (vector) parts reversed.
"""
new = self.copy()
new.x *= -1
new.y *= -1
new.z *= -1
return new |
returns q.conjugate()/q.norm()**2
So if the quaternion is unit length, it is the same
as the conjugate.
def inverse(self):
""" returns q.conjugate()/q.norm()**2
So if the quaternion is unit length, it is the same
as the conjugate.
"""
new = self.conjugate()
tmp = self.norm()**2
new.w /= tmp
new.x /= tmp
new.y /= tmp
new.z /= tmp
return new |
Returns the exponent of the quaternion.
(not tested)
def exp(self):
""" Returns the exponent of the quaternion.
(not tested)
"""
# Init
vecNorm = self.x**2 + self.y**2 + self.z**2
wPart = np.exp(self.w)
q = Quaternion()
# Calculate
q.w = wPart * np.cos(vecNorm)
q.x = wPart * self.x * np.sin(vecNorm) / vecNorm
q.y = wPart * self.y * np.sin(vecNorm) / vecNorm
q.z = wPart * self.z * np.sin(vecNorm) / vecNorm
return q |
Returns the natural logarithm of the quaternion.
(not tested)
def log(self):
""" Returns the natural logarithm of the quaternion.
(not tested)
"""
# Init
norm = self.norm()
vecNorm = self.x**2 + self.y**2 + self.z**2
tmp = self.w / norm
q = Quaternion()
# Calculate
q.w = np.log(norm)
q.x = np.log(norm) * self.x * np.arccos(tmp) / vecNorm
q.y = np.log(norm) * self.y * np.arccos(tmp) / vecNorm
q.z = np.log(norm) * self.z * np.arccos(tmp) / vecNorm
return q |
Rotate a Point instance using this quaternion.
def rotate_point(self, p):
""" Rotate a Point instance using this quaternion.
"""
# Prepare
p = Quaternion(0, p[0], p[1], p[2], False) # Do not normalize!
q1 = self.normalize()
q2 = self.inverse()
# Apply rotation
r = (q1*p)*q2
# Make point and return
return r.x, r.y, r.z |
Create a 4x4 homography matrix that represents the rotation
of the quaternion.
def get_matrix(self):
""" Create a 4x4 homography matrix that represents the rotation
of the quaternion.
"""
# Init matrix (remember, a matrix, not an array)
a = np.zeros((4, 4), dtype=np.float32)
w, x, y, z = self.w, self.x, self.y, self.z
# First row
a[0, 0] = - 2.0 * (y * y + z * z) + 1.0
a[1, 0] = + 2.0 * (x * y + z * w)
a[2, 0] = + 2.0 * (x * z - y * w)
a[3, 0] = 0.0
# Second row
a[0, 1] = + 2.0 * (x * y - z * w)
a[1, 1] = - 2.0 * (x * x + z * z) + 1.0
a[2, 1] = + 2.0 * (z * y + x * w)
a[3, 1] = 0.0
# Third row
a[0, 2] = + 2.0 * (x * z + y * w)
a[1, 2] = + 2.0 * (y * z - x * w)
a[2, 2] = - 2.0 * (x * x + y * y) + 1.0
a[3, 2] = 0.0
# Fourth row
a[0, 3] = 0.0
a[1, 3] = 0.0
a[2, 3] = 0.0
a[3, 3] = 1.0
return a |
Get the axis-angle representation of the quaternion.
(The angle is in radians)
def get_axis_angle(self):
""" Get the axis-angle representation of the quaternion.
(The angle is in radians)
"""
# Init
angle = 2 * np.arccos(max(min(self.w, 1.), -1.))
scale = (self.x**2 + self.y**2 + self.z**2)**0.5
# Calc axis
if scale:
ax = self.x / scale
ay = self.y / scale
az = self.z / scale
else:
# No rotation, so arbitrary axis
ax, ay, az = 1, 0, 0
# Return
return angle, ax, ay, az |
Classmethod to create a quaternion from an axis-angle representation.
(angle should be in radians).
def create_from_axis_angle(cls, angle, ax, ay, az, degrees=False):
""" Classmethod to create a quaternion from an axis-angle representation.
(angle should be in radians).
"""
if degrees:
angle = np.radians(angle)
while angle < 0:
angle += np.pi*2
angle2 = angle/2.0
sinang2 = np.sin(angle2)
return Quaternion(np.cos(angle2), ax*sinang2, ay*sinang2, az*sinang2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.