text
stringlengths
81
112k
Convert a matplotlib image to a base64 png representation Parameters ---------- image : matplotlib image object The image to be converted. Returns ------- image_base64 : string The UTF8-encoded base64 string representation of the png image. def image_to_base64(image): """ Convert a matplotlib image to a base64 png representation Parameters ---------- image : matplotlib image object The image to be converted. Returns ------- image_base64 : string The UTF8-encoded base64 string representation of the png image. """ ax = image.axes binary_buffer = io.BytesIO() # image is saved in axes coordinates: we need to temporarily # set the correct limits to get the correct image lim = ax.axis() ax.axis(image.get_extent()) image.write_png(binary_buffer) ax.axis(lim) binary_buffer.seek(0) return base64.b64encode(binary_buffer.read()).decode('utf-8')
Activate the IPython hook for VisPy. If the app is not specified, the default is used. def set_interactive(enabled=True, app=None): """Activate the IPython hook for VisPy. If the app is not specified, the default is used. """ if enabled: inputhook_manager.enable_gui('vispy', app) else: inputhook_manager.disable_gui()
Resize buffers only if necessary def _resize_buffers(self, font_scale): """Resize buffers only if necessary""" new_sizes = (font_scale,) + self.size if new_sizes == self._current_sizes: # don't need resize return self._n_rows = int(max(self.size[1] / (self._char_height * font_scale), 1)) self._n_cols = int(max(self.size[0] / (self._char_width * font_scale), 1)) self._bytes_012 = np.zeros((self._n_rows, self._n_cols, 3), np.float32) self._bytes_345 = np.zeros((self._n_rows, self._n_cols, 3), np.float32) pos = np.empty((self._n_rows, self._n_cols, 2), np.float32) C, R = np.meshgrid(np.arange(self._n_cols), np.arange(self._n_rows)) # We are in left, top orientation x_off = 4. y_off = 4 - self.size[1] / font_scale pos[..., 0] = x_off + self._char_width * C pos[..., 1] = y_off + self._char_height * R self._position = VertexBuffer(pos) # Restore lines for ii, line in enumerate(self._text_lines[:self._n_rows]): self._insert_text_buf(line, ii) self._current_sizes = new_sizes
Clear the console def clear(self): """Clear the console""" if hasattr(self, '_bytes_012'): self._bytes_012.fill(0) self._bytes_345.fill(0) self._text_lines = [] * self._n_rows self._pending_writes = []
Write text and scroll Parameters ---------- text : str Text to write. ``''`` can be used for a blank line, as a newline is automatically added to the end of each line. wrap : str If True, long messages will be wrapped to span multiple lines. def write(self, text='', wrap=True): """Write text and scroll Parameters ---------- text : str Text to write. ``''`` can be used for a blank line, as a newline is automatically added to the end of each line. wrap : str If True, long messages will be wrapped to span multiple lines. """ # Clear line if not isinstance(text, string_types): raise TypeError('text must be a string') # ensure we only have ASCII chars text = text.encode('utf-8').decode('ascii', errors='replace') self._pending_writes.append((text, wrap)) self.update()
Do any pending text writes def _do_pending_writes(self): """Do any pending text writes""" for text, wrap in self._pending_writes: # truncate in case of *really* long messages text = text[-self._n_cols*self._n_rows:] text = text.split('\n') text = [t if len(t) > 0 else '' for t in text] nr, nc = self._n_rows, self._n_cols for para in text: para = para[:nc] if not wrap else para lines = [para[ii:(ii+nc)] for ii in range(0, len(para), nc)] lines = [''] if len(lines) == 0 else lines for line in lines: # Update row and scroll if necessary self._text_lines.insert(0, line) self._text_lines = self._text_lines[:nr] self._bytes_012[1:] = self._bytes_012[:-1] self._bytes_345[1:] = self._bytes_345[:-1] self._insert_text_buf(line, 0) self._pending_writes = []
Insert text into bytes buffers def _insert_text_buf(self, line, idx): """Insert text into bytes buffers""" self._bytes_012[idx] = 0 self._bytes_345[idx] = 0 # Crop text if necessary I = np.array([ord(c) - 32 for c in line[:self._n_cols]]) I = np.clip(I, 0, len(__font_6x8__)-1) if len(I) > 0: b = __font_6x8__[I] self._bytes_012[idx, :len(I)] = b[:, :3] self._bytes_345[idx, :len(I)] = b[:, 3:]
Set verbatim code replacement It is strongly recommended to use function['$foo'] = 'bar' where possible because template variables are less likely to changed than the code itself in future versions of vispy. Parameters ---------- str1 : str String to replace str2 : str String to replace str1 with def replace(self, str1, str2): """ Set verbatim code replacement It is strongly recommended to use function['$foo'] = 'bar' where possible because template variables are less likely to changed than the code itself in future versions of vispy. Parameters ---------- str1 : str String to replace str2 : str String to replace str1 with """ if str2 != self._replacements.get(str1, None): self._replacements[str1] = str2 self.changed(code_changed=True)
find all template variables in self._code, excluding the function name. def _parse_template_vars(self): """ find all template variables in self._code, excluding the function name. """ template_vars = set() for var in parsing.find_template_variables(self._code): var = var.lstrip('$') if var == self.name: continue if var in ('pre', 'post'): raise ValueError('GLSL uses reserved template variable $%s' % var) template_vars.add(var) return template_vars
Return code, with new name, expressions, and replacements applied. def _get_replaced_code(self, names): """ Return code, with new name, expressions, and replacements applied. """ code = self._code # Modify name fname = names[self] code = code.replace(" " + self.name + "(", " " + fname + "(") # Apply string replacements first -- these may contain $placeholders for key, val in self._replacements.items(): code = code.replace(key, val) # Apply assignments to the end of the function # Collect post lines post_lines = [] for key, val in self._assignments.items(): if isinstance(key, Variable): key = names[key] if isinstance(val, ShaderObject): val = val.expression(names) line = ' %s = %s;' % (key, val) post_lines.append(line) # Add a default $post placeholder if needed if 'post' in self._expressions: post_lines.append(' $post') # Apply placeholders for hooks post_text = '\n'.join(post_lines) if post_text: post_text = '\n' + post_text + '\n' code = code.rpartition('}') code = code[0] + post_text + code[1] + code[2] # Add a default $pre placeholder if needed if 'pre' in self._expressions: m = re.search(fname + r'\s*\([^{]*\)\s*{', code) if m is None: raise RuntimeError("Cound not find beginning of function '%s'" % fname) ind = m.span()[1] code = code[:ind] + "\n $pre\n" + code[ind:] # Apply template variables for key, val in self._expressions.items(): val = val.expression(names) search = r'\$' + key + r'($|[^a-zA-Z0-9_])' code = re.sub(search, val+r'\1', code) # Done if '$' in code: v = parsing.find_template_variables(code) logger.warning('Unsubstituted placeholders in code: %s\n' ' replacements made: %s', v, list(self._expressions.keys())) return code + '\n'
Return *code* with indentation and leading/trailing blank lines removed. def _clean_code(self, code): """ Return *code* with indentation and leading/trailing blank lines removed. """ lines = code.split("\n") min_indent = 100 for line in lines: if line.strip() != "": indent = len(line) - len(line.lstrip()) min_indent = min(indent, min_indent) if min_indent > 0: lines = [line[min_indent:] for line in lines] code = "\n".join(lines) return code
Create a new ChainFunction and attach to $var. def add_chain(self, var): """ Create a new ChainFunction and attach to $var. """ chain = FunctionChain(var, []) self._chains[var] = chain self[var] = chain
Append a new function to the end of this chain. def append(self, function, update=True): """ Append a new function to the end of this chain. """ self._funcs.append(function) self._add_dep(function) if update: self._update()
Insert a new function into the chain at *index*. def insert(self, index, function, update=True): """ Insert a new function into the chain at *index*. """ self._funcs.insert(index, function) self._add_dep(function) if update: self._update()
Remove a function from the chain. def remove(self, function, update=True): """ Remove a function from the chain. """ self._funcs.remove(function) self._remove_dep(function) if update: self._update()
Add an item to the list unless it is already present. If the item is an expression, then a semicolon will be appended to it in the final compiled code. def add(self, item, position=5): """Add an item to the list unless it is already present. If the item is an expression, then a semicolon will be appended to it in the final compiled code. """ if item in self.items: return self.items[item] = position self._add_dep(item) self.order = None self.changed(code_changed=True)
Remove an item from the list. def remove(self, item): """Remove an item from the list. """ self.items.pop(item) self._remove_dep(item) self.order = None self.changed(code_changed=True)
Return an array (Nf, 3) of vertex indexes, three per triangular face in the mesh. If faces have not been computed for this mesh, the function computes them. If no vertices or faces are specified, the function returns None. def faces(self): """Return an array (Nf, 3) of vertex indexes, three per triangular face in the mesh. If faces have not been computed for this mesh, the function computes them. If no vertices or faces are specified, the function returns None. """ if self._faces is None: if self._vertices is None: return None self.triangulate() return self._faces
Return an array (Nf, 3) of vertices. If only faces exist, the function computes the vertices and returns them. If no vertices or faces are specified, the function returns None. def vertices(self): """Return an array (Nf, 3) of vertices. If only faces exist, the function computes the vertices and returns them. If no vertices or faces are specified, the function returns None. """ if self._faces is None: if self._vertices is None: return None self.triangulate() return self._vertices
Return an array of vertex indexes representing the convex hull. If faces have not been computed for this mesh, the function computes them. If no vertices or faces are specified, the function returns None. def convex_hull(self): """Return an array of vertex indexes representing the convex hull. If faces have not been computed for this mesh, the function computes them. If no vertices or faces are specified, the function returns None. """ if self._faces is None: if self._vertices is None: return None self.triangulate() return self._convex_hull
Triangulates the set of vertices and stores the triangles in faces and the convex hull in convex_hull. def triangulate(self): """ Triangulates the set of vertices and stores the triangles in faces and the convex hull in convex_hull. """ npts = self._vertices.shape[0] if np.any(self._vertices[0] != self._vertices[1]): # start != end, so edges must wrap around to beginning. edges = np.empty((npts, 2), dtype=np.uint32) edges[:, 0] = np.arange(npts) edges[:, 1] = edges[:, 0] + 1 edges[-1, 1] = 0 else: # start == end; no wrapping required. edges = np.empty((npts-1, 2), dtype=np.uint32) edges[:, 0] = np.arange(npts) edges[:, 1] = edges[:, 0] + 1 tri = Triangulation(self._vertices, edges) tri.triangulate() return tri.pts, tri.tris
Locate a filename into the shader library. def find(name): """Locate a filename into the shader library.""" if op.exists(name): return name path = op.dirname(__file__) or '.' paths = [path] + config['include_path'] for path in paths: filename = op.abspath(op.join(path, name)) if op.exists(filename): return filename for d in os.listdir(path): fullpath = op.abspath(op.join(path, d)) if op.isdir(fullpath): filename = op.abspath(op.join(fullpath, name)) if op.exists(filename): return filename return None
Retrieve code from the given filename. def get(name): """Retrieve code from the given filename.""" filename = find(name) if filename is None: raise RuntimeError('Could not find %s' % name) with open(filename) as fid: return fid.read()
try many times as in times with sleep time def expect(func, args, times=7, sleep_t=0.5): """try many times as in times with sleep time""" while times > 0: try: return func(*args) except Exception as e: times -= 1 logger.debug("expect failed - attempts left: %d" % times) time.sleep(sleep_t) if times == 0: raise exceptions.BaseExc(e)
convert a string to float def num(string): """convert a string to float""" if not isinstance(string, type('')): raise ValueError(type('')) try: string = re.sub('[^a-zA-Z0-9\.\-]', '', string) number = re.findall(r"[-+]?\d*\.\d+|[-+]?\d+", string) return float(number[0]) except Exception as e: logger = logging.getLogger('tradingAPI.utils.num') logger.debug("number not found in %s" % string) logger.debug(e) return None
get the unit of number def get_number_unit(number): """get the unit of number""" n = str(float(number)) mult, submult = n.split('.') if float(submult) != 0: unit = '0.' + (len(submult)-1)*'0' + '1' return float(unit) else: return float(1)
get value of pip def get_pip(mov=None, api=None, name=None): """get value of pip""" # ~ check args if mov is None and api is None: logger.error("need at least one of those") raise ValueError() elif mov is not None and api is not None: logger.error("mov and api are exclusive") raise ValueError() if api is not None: if name is None: logger.error("need a name") raise ValueError() mov = api.new_mov(name) mov.open() if mov is not None: mov._check_open() # find in the collection try: logger.debug(len(Glob().theCollector.collection)) pip = Glob().theCollector.collection['pip'] if name is not None: pip_res = pip[name] elif mov is not None: pip_res = pip[mov.product] logger.debug("pip found in the collection") return pip_res except KeyError: logger.debug("pip not found in the collection") # ~ vars records = [] intervals = [10, 20, 30] def _check_price(interval=10): timeout = time.time() + interval while time.time() < timeout: records.append(mov.get_price()) time.sleep(0.5) # find variation for interval in intervals: _check_price(interval) if min(records) == max(records): logger.debug("no variation in %d seconds" % interval) if interval == intervals[-1]: raise TimeoutError("no variation") else: break # find longer price for price in records: if 'best_price' not in locals(): best_price = price if len(str(price)) > len(str(best_price)): logger.debug("found new best_price %f" % price) best_price = price # get pip pip = get_number_unit(best_price) Glob().pipHandler.add_val({mov.product: pip}) return pip
Individual item sizes def itemsize(self): """ Individual item sizes """ return self._items[:self._count, 1] - self._items[:self._count, 0]
Set current capacity of the underlying array def reserve(self, capacity): """ Set current capacity of the underlying array""" if capacity >= self._data.size: capacity = int(2 ** np.ceil(np.log2(capacity))) self._data = np.resize(self._data, capacity)
Insert data before index Parameters ---------- index : int Index before which data will be inserted. data : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. itemsize: int or 1-D array If `itemsize is an integer, N, the array will be divided into elements of size N. If such partition is not possible, an error is raised. If `itemsize` is 1-D array, the array will be divided into elements whose succesive sizes will be picked from itemsize. If the sum of itemsize values is different from array size, an error is raised. def insert(self, index, data, itemsize=None): """ Insert data before index Parameters ---------- index : int Index before which data will be inserted. data : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. itemsize: int or 1-D array If `itemsize is an integer, N, the array will be divided into elements of size N. If such partition is not possible, an error is raised. If `itemsize` is 1-D array, the array will be divided into elements whose succesive sizes will be picked from itemsize. If the sum of itemsize values is different from array size, an error is raised. """ if not self._sizeable: raise AttributeError("List is not sizeable") if isinstance(data, (list, tuple)) and isinstance(data[0], (list, tuple)): # noqa itemsize = [len(l) for l in data] data = [item for sublist in data for item in sublist] data = np.array(data, copy=False).ravel() size = data.size # Check item size and get item number if itemsize is not None: if isinstance(itemsize, int): if (size % itemsize) != 0: raise ValueError("Cannot partition data as requested") _count = size // itemsize _itemsize = np.ones(_count, dtype=int) * (size // _count) else: _itemsize = np.array(itemsize, copy=False) _count = len(itemsize) if _itemsize.sum() != size: raise ValueError("Cannot partition data as requested") else: _count = 1 # Check if data array is big enough and resize it if necessary if self._size + size >= self._data.size: capacity = int(2 ** np.ceil(np.log2(self._size + size))) self._data = np.resize(self._data, capacity) # Check if item array is big enough and resize it if necessary if self._count + _count >= len(self._items): capacity = int(2 ** np.ceil(np.log2(self._count + _count))) self._items = np.resize(self._items, (capacity, 2)) # Check index if index < 0: index += len(self) if index < 0 or index > len(self): raise IndexError("List insertion index out of range") # Inserting if index < self._count: istart = index dstart = self._items[istart][0] dstop = self._items[istart][1] # Move data Z = self._data[dstart:self._size] self._data[dstart + size:self._size + size] = Z # Update moved items I = self._items[istart:self._count] + size self._items[istart + _count:self._count + _count] = I # Appending else: dstart = self._size istart = self._count # Only one item (faster) if _count == 1: # Store data self._data[dstart:dstart + size] = data self._size += size # Store data location (= item) self._items[istart][0] = dstart self._items[istart][1] = dstart + size self._count += 1 # Several items else: # Store data dstop = dstart + size self._data[dstart:dstop] = data self._size += size # Store items items = np.ones((_count, 2), int) * dstart C = _itemsize.cumsum() items[1:, 0] += C[:-1] items[0:, 1] += C istop = istart + _count self._items[istart:istop] = items self._count += _count
Append data to the end. Parameters ---------- data : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. itemsize: int or 1-D array If `itemsize is an integer, N, the array will be divided into elements of size N. If such partition is not possible, an error is raised. If `itemsize` is 1-D array, the array will be divided into elements whose succesive sizes will be picked from itemsize. If the sum of itemsize values is different from array size, an error is raised. def append(self, data, itemsize=None): """ Append data to the end. Parameters ---------- data : array_like An array, any object exposing the array interface, an object whose __array__ method returns an array, or any (nested) sequence. itemsize: int or 1-D array If `itemsize is an integer, N, the array will be divided into elements of size N. If such partition is not possible, an error is raised. If `itemsize` is 1-D array, the array will be divided into elements whose succesive sizes will be picked from itemsize. If the sum of itemsize values is different from array size, an error is raised. """ self.insert(len(self), data, itemsize)
Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm. It can be used to solve general nonlinear programming problems of the form: .. math:: \min_ {x \in R^n} f(x) subject to .. math:: x_L \leq x \leq x_U Where :math:`x` are the optimization variables (with upper and lower bounds), :math:`f(x)` is the objective function. Parameters ---------- func : objective function called as `func(x, *args)`; does not need to be defined everywhere, raise an Exception where function is not defined bounds : array-like ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. nvar: integer Dimensionality of x (only needed if `bounds` is not defined) eps : float Ensures sufficient decrease in function value when a new potentially optimal interval is chosen. maxf : integer Approximate upper bound on objective function evaluations. .. note:: Maximal allowed value is 90000 see documentation of Fortran library. maxT : integer Maximum number of iterations. .. note:: Maximal allowed value is 6000 see documentation of Fortran library. algmethod : integer Whether to use the original or modified DIRECT algorithm. Possible values: * ``algmethod=0`` - use the original DIRECT algorithm * ``algmethod=1`` - use the modified DIRECT-l algorithm fglobal : float Function value of the global optimum. If this value is not known set this to a very large negative value. fglper : float Terminate the optimization when the percent error satisfies: .. math:: 100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper} volper : float Terminate the optimization once the volume of a hyperrectangle is less than volper percent of the original hyperrectangel. sigmaper : float Terminate the optimization once the measure of the hyperrectangle is less than sigmaper. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. def minimize(func, bounds=None, nvar=None, args=(), disp=False, eps=1e-4, maxf=20000, maxT=6000, algmethod=0, fglobal=-1e100, fglper=0.01, volper=-1.0, sigmaper=-1.0, **kwargs ): """ Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm. It can be used to solve general nonlinear programming problems of the form: .. math:: \min_ {x \in R^n} f(x) subject to .. math:: x_L \leq x \leq x_U Where :math:`x` are the optimization variables (with upper and lower bounds), :math:`f(x)` is the objective function. Parameters ---------- func : objective function called as `func(x, *args)`; does not need to be defined everywhere, raise an Exception where function is not defined bounds : array-like ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. nvar: integer Dimensionality of x (only needed if `bounds` is not defined) eps : float Ensures sufficient decrease in function value when a new potentially optimal interval is chosen. maxf : integer Approximate upper bound on objective function evaluations. .. note:: Maximal allowed value is 90000 see documentation of Fortran library. maxT : integer Maximum number of iterations. .. note:: Maximal allowed value is 6000 see documentation of Fortran library. algmethod : integer Whether to use the original or modified DIRECT algorithm. Possible values: * ``algmethod=0`` - use the original DIRECT algorithm * ``algmethod=1`` - use the modified DIRECT-l algorithm fglobal : float Function value of the global optimum. If this value is not known set this to a very large negative value. fglper : float Terminate the optimization when the percent error satisfies: .. math:: 100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper} volper : float Terminate the optimization once the volume of a hyperrectangle is less than volper percent of the original hyperrectangel. sigmaper : float Terminate the optimization once the measure of the hyperrectangle is less than sigmaper. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. """ if bounds is None: l = np.zeros(nvar, dtype=np.float64) u = np.ones(nvar, dtype=np.float64) else: bounds = np.asarray(bounds) l = bounds[:, 0] u = bounds[:, 1] def _objective_wrap(x, iidata, ddata, cdata, n, iisize, idsize, icsize): """ Wrap the python objective to comply with the signature required by the Fortran library. Returns the function value and a flag indicating whether function is defined. If function is not defined return np.nan """ try: return func(x, *args), 0 except: return np.nan, 1 # # Dummy values so that the python wrapper will comply with the required # signature of the fortran library. # iidata = np.ones(0, dtype=np.int32) ddata = np.ones(0, dtype=np.float64) cdata = np.ones([0, 40], dtype=np.uint8) # # Call the DIRECT algorithm # x, fun, ierror = direct( _objective_wrap, eps, maxf, maxT, l, u, algmethod, 'dummylogfile', fglobal, fglper, volper, sigmaper, iidata, ddata, cdata, disp ) return OptimizeResult(x=x,fun=fun, status=ierror, success=ierror>0, message=SUCCESS_MESSAGES[ierror-1] if ierror>0 else ERROR_MESSAGES[abs(ierror)-1])
Get screen DPI from the OS Parameters ---------- raise_error : bool If True, raise an error if DPI could not be determined. Returns ------- dpi : float Dots per inch of the primary screen. def get_dpi(raise_error=True): """Get screen DPI from the OS Parameters ---------- raise_error : bool If True, raise an error if DPI could not be determined. Returns ------- dpi : float Dots per inch of the primary screen. """ try: user32.SetProcessDPIAware() except AttributeError: pass # not present on XP dc = user32.GetDC(0) h_size = gdi32.GetDeviceCaps(dc, HORZSIZE) v_size = gdi32.GetDeviceCaps(dc, VERTSIZE) h_res = gdi32.GetDeviceCaps(dc, HORZRES) v_res = gdi32.GetDeviceCaps(dc, VERTRES) user32.ReleaseDC(None, dc) return (h_res/float(h_size) + v_res/float(v_size)) * 0.5 * 25.4
Reset shader source if necesssary. def build_if_needed(self): """ Reset shader source if necesssary. """ if self._need_build: self._build() self._need_build = False self.update_variables()
Returns a function that maps a number n from range (a, b) onto a range (c, d). If no curvefn is given, linear mapping will be used. Optionally a normalisation function normfn can be provided to transform output. def nmap(a, b, c, d, curvefn=None, normfn=None): """ Returns a function that maps a number n from range (a, b) onto a range (c, d). If no curvefn is given, linear mapping will be used. Optionally a normalisation function normfn can be provided to transform output. """ if not curvefn: curvefn = lambda x: x def map(n): r = 1.0 * (n - a) / (b - a) out = curvefn(r) * (d - c) + c if not normfn: return out return normfn(out) return map
Link this axis to a ViewBox This makes it so that the axis's domain always matches the visible range in the ViewBox. Parameters ---------- view : instance of ViewBox The ViewBox to link. def link_view(self, view): """Link this axis to a ViewBox This makes it so that the axis's domain always matches the visible range in the ViewBox. Parameters ---------- view : instance of ViewBox The ViewBox to link. """ if view is self._linked_view: return if self._linked_view is not None: self._linked_view.scene.transform.changed.disconnect( self._view_changed) self._linked_view = view view.scene.transform.changed.connect(self._view_changed) self._view_changed()
Linked view transform has changed; update ticks. def _view_changed(self, event=None): """Linked view transform has changed; update ticks. """ tr = self.node_transform(self._linked_view.scene) p1, p2 = tr.map(self._axis_ends()) if self.orientation in ('left', 'right'): self.axis.domain = (p1[1], p2[1]) else: self.axis.domain = (p1[0], p2[0])
ViewBox mouse event handler Parameters ---------- event : instance of Event The mouse event. def viewbox_mouse_event(self, event): """ViewBox mouse event handler Parameters ---------- event : instance of Event The mouse event. """ # When the attached ViewBox reseives a mouse event, it is sent to the # camera here. self.mouse_pos = event.pos[:2] if event.type == 'mouse_wheel': # wheel rolled; adjust the magnification factor and hide the # event from the superclass m = self.mag_target m *= 1.2 ** event.delta[1] m = m if m > 1 else 1 self.mag_target = m else: # send everything _except_ wheel events to the superclass super(MagnifyCamera, self).viewbox_mouse_event(event) # start the timer to smoothly modify the transform properties. if not self.timer.running: self.timer.start() self._update_transform()
Timer event handler Parameters ---------- event : instance of Event The timer event. def on_timer(self, event=None): """Timer event handler Parameters ---------- event : instance of Event The timer event. """ # Smoothly update center and magnification properties of the transform k = np.clip(100. / self.mag.mag, 10, 100) s = 10**(-k * event.dt) c = np.array(self.mag.center) c1 = c * s + self.mouse_pos * (1-s) m = self.mag.mag * s + self.mag_target * (1-s) # If changes are very small, then it is safe to stop the timer. if (np.all(np.abs((c - c1) / c1) < 1e-5) and (np.abs(np.log(m / self.mag.mag)) < 1e-3)): self.timer.stop() self.mag.center = c1 self.mag.mag = m self._update_transform()
Data can be numpy array or the size of data to allocate. def glBufferData(target, data, usage): """ Data can be numpy array or the size of data to allocate. """ if isinstance(data, int): size = data data = ctypes.c_voidp(0) else: if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']: data = data.copy('C') data_ = data size = data_.nbytes data = data_.ctypes.data try: nativefunc = glBufferData._native except AttributeError: nativefunc = glBufferData._native = _get_gl_func("glBufferData", None, (ctypes.c_uint, ctypes.c_int, ctypes.c_void_p, ctypes.c_uint,)) res = nativefunc(target, size, data, usage)
Return next power of 2 greater than or equal to n def next_power_of_2(n): """ Return next power of 2 greater than or equal to n """ n -= 1 # greater than OR EQUAL TO n shift = 1 while (n + 1) & n: # n+1 is not a power of 2 yet n |= n >> shift shift *= 2 return max(4, n + 1)
Parameters ---------- vertices : numpy array An array whose dtype is compatible with self.vdtype uniforms: numpy array An array whose dtype is compatible with self.utype indices : numpy array An array whose dtype is compatible with self.idtype All index values must be between 0 and len(vertices) itemsize: int, tuple or 1-D array If `itemsize is an integer, N, the array will be divided into elements of size N. If such partition is not possible, an error is raised. If `itemsize` is 1-D array, the array will be divided into elements whose succesive sizes will be picked from itemsize. If the sum of itemsize values is different from array size, an error is raised. def append(self, vertices, uniforms=None, indices=None, itemsize=None): """ Parameters ---------- vertices : numpy array An array whose dtype is compatible with self.vdtype uniforms: numpy array An array whose dtype is compatible with self.utype indices : numpy array An array whose dtype is compatible with self.idtype All index values must be between 0 and len(vertices) itemsize: int, tuple or 1-D array If `itemsize is an integer, N, the array will be divided into elements of size N. If such partition is not possible, an error is raised. If `itemsize` is 1-D array, the array will be divided into elements whose succesive sizes will be picked from itemsize. If the sum of itemsize values is different from array size, an error is raised. """ # Vertices # ----------------------------- vertices = np.array(vertices).astype(self.vtype).ravel() vsize = self._vertices_list.size # No itemsize given # ----------------- if itemsize is None: index = 0 count = 1 # Uniform itemsize (int) # ---------------------- elif isinstance(itemsize, int): count = len(vertices) / itemsize index = np.repeat(np.arange(count), itemsize) # Individual itemsize (array) # --------------------------- elif isinstance(itemsize, (np.ndarray, list)): count = len(itemsize) index = np.repeat(np.arange(count), itemsize) else: raise ValueError("Itemsize not understood") if self.utype: vertices["collection_index"] = index + len(self) self._vertices_list.append(vertices, itemsize) # Indices # ----------------------------- if self.itype is not None: # No indices given (-> automatic generation) if indices is None: indices = vsize + np.arange(len(vertices)) self._indices_list.append(indices, itemsize) # Indices given # FIXME: variables indices (list of list or ArrayList) else: if itemsize is None: I = np.array(indices) + vsize elif isinstance(itemsize, int): I = vsize + (np.tile(indices, count) + itemsize * np.repeat(np.arange(count), len(indices))) # noqa else: raise ValueError("Indices not compatible with items") self._indices_list.append(I, len(indices)) # Uniforms # ----------------------------- if self.utype: if uniforms is None: uniforms = np.zeros(count, dtype=self.utype) else: uniforms = np.array(uniforms).astype(self.utype).ravel() self._uniforms_list.append(uniforms, itemsize=1) self._need_update = True
Compute uniform texture shape def _compute_texture_shape(self, size=1): """ Compute uniform texture shape """ # We should use this line but we may not have a GL context yet # linesize = gl.glGetInteger(gl.GL_MAX_TEXTURE_SIZE) linesize = 1024 count = self._uniforms_float_count cols = 4 * linesize // int(count) rows = max(1, int(math.ceil(size / float(cols)))) shape = rows, cols * (count // 4), count self._ushape = shape return shape
Update vertex buffers & texture def _update(self): """ Update vertex buffers & texture """ if self._vertices_buffer is not None: self._vertices_buffer.delete() self._vertices_buffer = VertexBuffer(self._vertices_list.data) if self.itype is not None: if self._indices_buffer is not None: self._indices_buffer.delete() self._indices_buffer = IndexBuffer(self._indices_list.data) if self.utype is not None: if self._uniforms_texture is not None: self._uniforms_texture.delete() # We take the whole array (_data), not the data one texture = self._uniforms_list._data.view(np.float32) size = len(texture) / self._uniforms_float_count shape = self._compute_texture_shape(size) # shape[2] = float count is only used in vertex shader code texture = texture.reshape(shape[0], shape[1], 4) self._uniforms_texture = Texture2D(texture) self._uniforms_texture.data = texture self._uniforms_texture.interpolation = 'nearest' if len(self._programs): for program in self._programs: program.bind(self._vertices_buffer) if self._uniforms_list is not None: program["uniforms"] = self._uniforms_texture program["uniforms_shape"] = self._ushape
Retrieve a graph layout Some graph layouts accept extra options. Please refer to their documentation for more information. Parameters ---------- name : string The name of the layout. The variable `AVAILABLE_LAYOUTS` contains all available layouts. *args Positional arguments which are passed to the layout. **kwargs Keyword arguments which are passed to the layout. Returns ------- layout : callable The callable generator which will calculate the graph layout def get_layout(name, *args, **kwargs): """ Retrieve a graph layout Some graph layouts accept extra options. Please refer to their documentation for more information. Parameters ---------- name : string The name of the layout. The variable `AVAILABLE_LAYOUTS` contains all available layouts. *args Positional arguments which are passed to the layout. **kwargs Keyword arguments which are passed to the layout. Returns ------- layout : callable The callable generator which will calculate the graph layout """ if name not in _layout_map: raise KeyError("Graph layout '%s' not found. Should be one of %s" % (name, AVAILABLE_LAYOUTS)) layout = _layout_map[name] if inspect.isclass(layout): layout = layout(*args, **kwargs) return layout
Given viewer session information, make sure the session information is compatible with the current version of the viewers, and if not, update the session information in-place. def update_viewer_state(rec, context): """ Given viewer session information, make sure the session information is compatible with the current version of the viewers, and if not, update the session information in-place. """ if '_protocol' not in rec: rec.pop('properties') rec['state'] = {} rec['state']['values'] = rec.pop('options') layer_states = [] for layer in rec['layers']: state_id = str(uuid.uuid4()) state_cls = STATE_CLASS[layer['_type'].split('.')[-1]] state = state_cls(layer=context.object(layer.pop('layer'))) properties = set(layer.keys()) - set(['_type']) for prop in sorted(properties, key=state.update_priority, reverse=True): value = layer.pop(prop) value = context.object(value) if isinstance(value, six.string_types) and value == 'fixed': value = 'Fixed' if isinstance(value, six.string_types) and value == 'linear': value = 'Linear' setattr(state, prop, value) context.register_object(state_id, state) layer['state'] = state_id layer_states.append(state) list_id = str(uuid.uuid4()) context.register_object(list_id, layer_states) rec['state']['values']['layers'] = list_id rec['state']['values']['visible_axes'] = rec['state']['values'].pop('visible_box')
Remove C-style comment from GLSL code string. def remove_comments(code): """Remove C-style comment from GLSL code string.""" pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*\n)" # first group captures quoted strings (double or single) # second group captures comments (//single-line or /* multi-line */) regex = re.compile(pattern, re.MULTILINE | re.DOTALL) def do_replace(match): # if the 2nd group (capturing comments) is not None, # it means we have captured a non-quoted (real) comment string. if match.group(2) is not None: return "" # so we will return empty to remove the comment else: # otherwise, we will return the 1st group return match.group(1) # captured quoted-string return regex.sub(do_replace, code)
Merge all includes recursively. def merge_includes(code): """Merge all includes recursively.""" pattern = '\#\s*include\s*"(?P<filename>[a-zA-Z0-9\_\-\.\/]+)"' regex = re.compile(pattern) includes = [] def replace(match): filename = match.group("filename") if filename not in includes: includes.append(filename) path = glsl.find(filename) if not path: logger.critical('"%s" not found' % filename) raise RuntimeError("File not found", filename) text = '\n// --- start of "%s" ---\n' % filename with open(path) as fh: text += fh.read() text += '// --- end of "%s" ---\n' % filename return text return '' # Limit recursion to depth 10 for i in range(10): if re.search(regex, code): code = re.sub(regex, replace, code) else: break return code
Add a new widget to this grid. This will cause other widgets in the grid to be resized to make room for the new widget. Can be used to replace a widget as well Parameters ---------- widget : Widget | None The Widget to add. New widget is constructed if widget is None. row : int The row in which to add the widget (0 is the topmost row) col : int The column in which to add the widget (0 is the leftmost column) row_span : int The number of rows to be occupied by this widget. Default is 1. col_span : int The number of columns to be occupied by this widget. Default is 1. **kwargs : dict parameters sent to the new Widget that is constructed if widget is None Notes ----- The widget's parent is automatically set to this grid, and all other parent(s) are removed. def add_widget(self, widget=None, row=None, col=None, row_span=1, col_span=1, **kwargs): """ Add a new widget to this grid. This will cause other widgets in the grid to be resized to make room for the new widget. Can be used to replace a widget as well Parameters ---------- widget : Widget | None The Widget to add. New widget is constructed if widget is None. row : int The row in which to add the widget (0 is the topmost row) col : int The column in which to add the widget (0 is the leftmost column) row_span : int The number of rows to be occupied by this widget. Default is 1. col_span : int The number of columns to be occupied by this widget. Default is 1. **kwargs : dict parameters sent to the new Widget that is constructed if widget is None Notes ----- The widget's parent is automatically set to this grid, and all other parent(s) are removed. """ if row is None: row = self._next_cell[0] if col is None: col = self._next_cell[1] if widget is None: widget = Widget(**kwargs) else: if kwargs: raise ValueError("cannot send kwargs if widget is given") _row = self._cells.setdefault(row, {}) _row[col] = widget self._grid_widgets[self._n_added] = (row, col, row_span, col_span, widget) self._n_added += 1 widget.parent = self self._next_cell = [row, col+col_span] widget._var_w = Variable("w-(row: %s | col: %s)" % (row, col)) widget._var_h = Variable("h-(row: %s | col: %s)" % (row, col)) # update stretch based on colspan/rowspan # usually, if you make something consume more grids or columns, # you also want it to actually *take it up*, ratio wise. # otherwise, it will never *use* the extra rows and columns, # thereby collapsing the extras to 0. stretch = list(widget.stretch) stretch[0] = col_span if stretch[0] is None else stretch[0] stretch[1] = row_span if stretch[1] is None else stretch[1] widget.stretch = stretch self._need_solver_recreate = True return widget
Remove a widget from this grid Parameters ---------- widget : Widget The Widget to remove def remove_widget(self, widget): """Remove a widget from this grid Parameters ---------- widget : Widget The Widget to remove """ self._grid_widgets = dict((key, val) for (key, val) in self._grid_widgets.items() if val[-1] != widget) self._need_solver_recreate = True
Resize a widget in the grid to new dimensions. Parameters ---------- widget : Widget The widget to resize row_span : int The number of rows to be occupied by this widget. col_span : int The number of columns to be occupied by this widget. def resize_widget(self, widget, row_span, col_span): """Resize a widget in the grid to new dimensions. Parameters ---------- widget : Widget The widget to resize row_span : int The number of rows to be occupied by this widget. col_span : int The number of columns to be occupied by this widget. """ row = None col = None for (r, c, rspan, cspan, w) in self._grid_widgets.values(): if w == widget: row = r col = c break if row is None or col is None: raise ValueError("%s not found in grid" % widget) self.remove_widget(widget) self.add_widget(widget, row, col, row_span, col_span) self._need_solver_recreate = True
Create a new Grid and add it as a child widget. Parameters ---------- row : int The row in which to add the widget (0 is the topmost row) col : int The column in which to add the widget (0 is the leftmost column) row_span : int The number of rows to be occupied by this widget. Default is 1. col_span : int The number of columns to be occupied by this widget. Default is 1. **kwargs : dict Keyword arguments to pass to the new `Grid`. def add_grid(self, row=None, col=None, row_span=1, col_span=1, **kwargs): """ Create a new Grid and add it as a child widget. Parameters ---------- row : int The row in which to add the widget (0 is the topmost row) col : int The column in which to add the widget (0 is the leftmost column) row_span : int The number of rows to be occupied by this widget. Default is 1. col_span : int The number of columns to be occupied by this widget. Default is 1. **kwargs : dict Keyword arguments to pass to the new `Grid`. """ from .grid import Grid grid = Grid(**kwargs) return self.add_widget(grid, row, col, row_span, col_span)
Create a new ViewBox and add it as a child widget. Parameters ---------- row : int The row in which to add the widget (0 is the topmost row) col : int The column in which to add the widget (0 is the leftmost column) row_span : int The number of rows to be occupied by this widget. Default is 1. col_span : int The number of columns to be occupied by this widget. Default is 1. **kwargs : dict Keyword arguments to pass to `ViewBox`. def add_view(self, row=None, col=None, row_span=1, col_span=1, **kwargs): """ Create a new ViewBox and add it as a child widget. Parameters ---------- row : int The row in which to add the widget (0 is the topmost row) col : int The column in which to add the widget (0 is the leftmost column) row_span : int The number of rows to be occupied by this widget. Default is 1. col_span : int The number of columns to be occupied by this widget. Default is 1. **kwargs : dict Keyword arguments to pass to `ViewBox`. """ from .viewbox import ViewBox view = ViewBox(**kwargs) return self.add_widget(view, row, col, row_span, col_span)
Find font def find_font(face, bold, italic): """Find font""" bold = FC_WEIGHT_BOLD if bold else FC_WEIGHT_REGULAR italic = FC_SLANT_ITALIC if italic else FC_SLANT_ROMAN face = face.encode('utf8') fontconfig.FcInit() pattern = fontconfig.FcPatternCreate() fontconfig.FcPatternAddInteger(pattern, FC_WEIGHT, bold) fontconfig.FcPatternAddInteger(pattern, FC_SLANT, italic) fontconfig.FcPatternAddString(pattern, FC_FAMILY, face) fontconfig.FcConfigSubstitute(0, pattern, FcMatchPattern) fontconfig.FcDefaultSubstitute(pattern) result = FcType() match = fontconfig.FcFontMatch(0, pattern, byref(result)) fontconfig.FcPatternDestroy(pattern) if not match: raise RuntimeError('Could not match font "%s"' % face) value = FcValue() fontconfig.FcPatternGet(match, FC_FAMILY, 0, byref(value)) if(value.u.s != face): warnings.warn('Could not find face match "%s", falling back to "%s"' % (face, value.u.s)) result = fontconfig.FcPatternGet(match, FC_FILE, 0, byref(value)) if result != 0: raise RuntimeError('No filename or FT face for "%s"' % face) fname = value.u.s return fname.decode('utf-8')
List system fonts def _list_fonts(): """List system fonts""" stdout_, stderr = run_subprocess(['fc-list', ':scalable=true', 'family']) vals = [v.split(',')[0] for v in stdout_.strip().splitlines(False)] return vals
Helper to get vispy calling function from the stack def _get_vispy_caller(): """Helper to get vispy calling function from the stack""" records = inspect.stack() # first few records are vispy-based logging calls for record in records[5:]: module = record[0].f_globals['__name__'] if module.startswith('vispy'): line = str(record[0].f_lineno) func = record[3] cls = record[0].f_locals.get('self', None) clsname = "" if cls is None else cls.__class__.__name__ + '.' caller = "{0}:{1}{2}({3}): ".format(module, clsname, func, line) return caller return 'unknown'
Convenience function for setting the logging level Parameters ---------- verbose : bool, str, int, or None The verbosity of messages to print. If a str, it can be either DEBUG, INFO, WARNING, ERROR, or CRITICAL. Note that these are for convenience and are equivalent to passing in logging.DEBUG, etc. For bool, True is the same as 'INFO', False is the same as 'WARNING'. match : str | None String to match. Only those messages that both contain a substring that regexp matches ``'match'`` (and the ``verbose`` level) will be displayed. return_old : bool If True, return the old verbosity level and old match. Notes ----- If ``verbose=='debug'``, then the ``vispy`` method emitting the log message will be prepended to each log message, which is useful for debugging. If ``verbose=='debug'`` or ``match is not None``, then a small performance overhead is added. Thus it is suggested to only use these options when performance is not crucial. See also -------- vispy.util.use_log_level def set_log_level(verbose, match=None, return_old=False): """Convenience function for setting the logging level Parameters ---------- verbose : bool, str, int, or None The verbosity of messages to print. If a str, it can be either DEBUG, INFO, WARNING, ERROR, or CRITICAL. Note that these are for convenience and are equivalent to passing in logging.DEBUG, etc. For bool, True is the same as 'INFO', False is the same as 'WARNING'. match : str | None String to match. Only those messages that both contain a substring that regexp matches ``'match'`` (and the ``verbose`` level) will be displayed. return_old : bool If True, return the old verbosity level and old match. Notes ----- If ``verbose=='debug'``, then the ``vispy`` method emitting the log message will be prepended to each log message, which is useful for debugging. If ``verbose=='debug'`` or ``match is not None``, then a small performance overhead is added. Thus it is suggested to only use these options when performance is not crucial. See also -------- vispy.util.use_log_level """ # This method is responsible for setting properties of the handler and # formatter such that proper messages (possibly with the vispy caller # prepended) are displayed. Storing log messages is only available # via the context handler (use_log_level), so that configuration is # done by the context handler itself. if isinstance(verbose, bool): verbose = 'info' if verbose else 'warning' if isinstance(verbose, string_types): verbose = verbose.lower() if verbose not in logging_types: raise ValueError('Invalid argument "%s"' % verbose) verbose = logging_types[verbose] else: raise TypeError('verbose must be a bool or string') logger = logging.getLogger('vispy') old_verbose = logger.level old_match = _lh._vispy_set_match(match) logger.setLevel(verbose) if verbose <= logging.DEBUG: _lf._vispy_set_prepend(True) else: _lf._vispy_set_prepend(False) out = None if return_old: out = (old_verbose, old_match) return out
Send an exception and traceback to the logger. This function is used in cases where an exception is handled safely but nevertheless should generate a descriptive error message. An extra line is inserted into the stack trace indicating where the exception was caught. Parameters ---------- level : str See ``set_log_level`` for options. tb_skip : int The number of traceback entries to ignore, prior to the point where the exception was caught. The default is 2. def log_exception(level='warning', tb_skip=2): """ Send an exception and traceback to the logger. This function is used in cases where an exception is handled safely but nevertheless should generate a descriptive error message. An extra line is inserted into the stack trace indicating where the exception was caught. Parameters ---------- level : str See ``set_log_level`` for options. tb_skip : int The number of traceback entries to ignore, prior to the point where the exception was caught. The default is 2. """ stack = "".join(traceback.format_stack()[:-tb_skip]) tb = traceback.format_exception(*sys.exc_info()) msg = tb[0] # "Traceback (most recent call last):" msg += stack msg += " << caught exception here: >>\n" msg += "".join(tb[1:]).rstrip() logger.log(logging_types[level], msg)
Helper for prining errors in callbacks See EventEmitter._invoke_callback for a use example. def _handle_exception(ignore_callback_errors, print_callback_errors, obj, cb_event=None, node=None): """Helper for prining errors in callbacks See EventEmitter._invoke_callback for a use example. """ if not hasattr(obj, '_vispy_err_registry'): obj._vispy_err_registry = {} registry = obj._vispy_err_registry if cb_event is not None: cb, event = cb_event exp_type = 'callback' else: exp_type = 'node' type_, value, tb = sys.exc_info() tb = tb.tb_next # Skip *this* frame sys.last_type = type_ sys.last_value = value sys.last_traceback = tb del tb # Get rid of it in this namespace # Handle if not ignore_callback_errors: raise if print_callback_errors != "never": this_print = 'full' if print_callback_errors in ('first', 'reminders'): # need to check to see if we've hit this yet if exp_type == 'callback': key = repr(cb) + repr(event) else: key = repr(node) if key in registry: registry[key] += 1 if print_callback_errors == 'first': this_print = None else: # reminders ii = registry[key] # Use logarithmic selection # (1, 2, ..., 10, 20, ..., 100, 200, ...) if ii == (2 ** int(np.log2(ii))): this_print = ii else: this_print = None else: registry[key] = 1 if this_print == 'full': logger.log_exception() if exp_type == 'callback': logger.error("Invoking %s for %s" % (cb, event)) else: # == 'node': logger.error("Drawing node %s" % node) elif this_print is not None: if exp_type == 'callback': logger.error("Invoking %s repeat %s" % (cb, this_print)) else: # == 'node': logger.error("Drawing node %s repeat %s" % (node, this_print))
Serialize a NumPy array. def _serialize_buffer(buffer, array_serialization=None): """Serialize a NumPy array.""" if array_serialization == 'binary': # WARNING: in NumPy 1.9, tostring() has been renamed to tobytes() # but tostring() is still here for now for backward compatibility. return buffer.ravel().tostring() elif array_serialization == 'base64': return {'storage_type': 'base64', 'buffer': base64.b64encode(buffer).decode('ascii') } raise ValueError("The array serialization method should be 'binary' or " "'base64'.")
Log message emitter that optionally matches and/or records def _vispy_emit_match_andor_record(self, record): """Log message emitter that optionally matches and/or records""" test = record.getMessage() match = self._vispy_match if (match is None or re.search(match, test) or re.search(match, _get_vispy_caller())): if self._vispy_emit_record: fmt_rec = self._vispy_formatter.format(record) self._vispy_emit_list.append(fmt_rec) if self._vispy_print_msg: return logging.StreamHandler.emit(self, record) else: return
Convert *obj* to a new ShaderObject. If the output is a Variable with no name, then set its name using *ref*. def create(self, obj, ref=None): """ Convert *obj* to a new ShaderObject. If the output is a Variable with no name, then set its name using *ref*. """ if isinstance(ref, Variable): ref = ref.name elif isinstance(ref, string_types) and ref.startswith('gl_'): # gl_ names not allowed for variables ref = ref[3:].lower() # Allow any type of object to be converted to ShaderObject if it # provides a magic method: if hasattr(obj, '_shader_object'): obj = obj._shader_object() if isinstance(obj, ShaderObject): if isinstance(obj, Variable) and obj.name is None: obj.name = ref elif isinstance(obj, string_types): obj = TextExpression(obj) else: obj = Variable(ref, obj) # Try prepending the name to indicate attribute, uniform, varying if obj.vtype and obj.vtype[0] in 'auv': obj.name = obj.vtype[0] + '_' + obj.name return obj
Return all dependencies required to use this object. The last item in the list is *self*. def dependencies(self, sort=False): """ Return all dependencies required to use this object. The last item in the list is *self*. """ alldeps = [] if sort: def key(obj): # sort deps such that we get functions, variables, self. if not isinstance(obj, Variable): return (0, 0) else: return (1, obj.vtype) deps = sorted(self._deps, key=key) else: deps = self._deps for dep in deps: alldeps.extend(dep.dependencies(sort=sort)) alldeps.append(self) return alldeps
Increment the reference count for *dep*. If this is a new dependency, then connect to its *changed* event. def _add_dep(self, dep): """ Increment the reference count for *dep*. If this is a new dependency, then connect to its *changed* event. """ if dep in self._deps: self._deps[dep] += 1 else: self._deps[dep] = 1 dep._dependents[self] = None
Decrement the reference count for *dep*. If the reference count reaches 0, then the dependency is removed and its *changed* event is disconnected. def _remove_dep(self, dep): """ Decrement the reference count for *dep*. If the reference count reaches 0, then the dependency is removed and its *changed* event is disconnected. """ refcount = self._deps[dep] if refcount == 1: self._deps.pop(dep) dep._dependents.pop(self) else: self._deps[dep] -= 1
Called when a dependency's expression has changed. def _dep_changed(self, dep, code_changed=False, value_changed=False): """ Called when a dependency's expression has changed. """ self.changed(code_changed, value_changed)
Inform dependents that this shaderobject has changed. def changed(self, code_changed=False, value_changed=False): """Inform dependents that this shaderobject has changed. """ for d in self._dependents: d._dep_changed(self, code_changed=code_changed, value_changed=value_changed)
The great missing equivalence function: Guaranteed evaluation to a single bool value. def eq(a, b): """ The great missing equivalence function: Guaranteed evaluation to a single bool value. """ if a is b: return True if a is None or b is None: return True if a is None and b is None else False try: e = a == b except ValueError: return False except AttributeError: return False except Exception: print("a:", str(type(a)), str(a)) print("b:", str(type(b)), str(b)) raise t = type(e) if t is bool: return e elif t is bool_: return bool(e) elif isinstance(e, ndarray): try: # disaster: if a is empty and b is not, then e.all() is True if a.shape != b.shape: return False except Exception: return False if (hasattr(e, 'implements') and e.implements('MetaArray')): return e.asarray().all() else: return e.all() else: raise Exception("== operator returned type %s" % str(type(e)))
Zoom in (or out) at the given center Parameters ---------- factor : float or tuple Fraction by which the scene should be zoomed (e.g. a factor of 2 causes the scene to appear twice as large). center : tuple of 2-4 elements The center of the view. If not given or None, use the current center. def zoom(self, factor, center=None): """ Zoom in (or out) at the given center Parameters ---------- factor : float or tuple Fraction by which the scene should be zoomed (e.g. a factor of 2 causes the scene to appear twice as large). center : tuple of 2-4 elements The center of the view. If not given or None, use the current center. """ assert len(center) in (2, 3, 4) # Get scale factor, take scale ratio into account if np.isscalar(factor): scale = [factor, factor] else: if len(factor) != 2: raise TypeError("factor must be scalar or length-2 sequence.") scale = list(factor) if self.aspect is not None: scale[0] = scale[1] # Init some variables center = center if (center is not None) else self.center # Make a new object (copy), so that allocation will # trigger view_changed: rect = Rect(self.rect) # Get space from given center to edges left_space = center[0] - rect.left right_space = rect.right - center[0] bottom_space = center[1] - rect.bottom top_space = rect.top - center[1] # Scale these spaces rect.left = center[0] - left_space * scale[0] rect.right = center[0] + right_space * scale[0] rect.bottom = center[1] - bottom_space * scale[1] rect.top = center[1] + top_space * scale[1] self.rect = rect
Pan the view. Parameters ---------- *pan : length-2 sequence The distance to pan the view, in the coordinate system of the scene. def pan(self, *pan): """Pan the view. Parameters ---------- *pan : length-2 sequence The distance to pan the view, in the coordinate system of the scene. """ if len(pan) == 1: pan = pan[0] self.rect = self.rect + pan
The SubScene received a mouse event; update transform accordingly. Parameters ---------- event : instance of Event The event. def viewbox_mouse_event(self, event): """ The SubScene received a mouse event; update transform accordingly. Parameters ---------- event : instance of Event The event. """ if event.handled or not self.interactive: return # Scrolling BaseCamera.viewbox_mouse_event(self, event) if event.type == 'mouse_wheel': center = self._scene_transform.imap(event.pos) self.zoom((1 + self.zoom_factor) ** (-event.delta[1] * 30), center) event.handled = True elif event.type == 'mouse_move': if event.press_event is None: return modifiers = event.mouse_event.modifiers p1 = event.mouse_event.press_event.pos p2 = event.mouse_event.pos if 1 in event.buttons and not modifiers: # Translate p1 = np.array(event.last_event.pos)[:2] p2 = np.array(event.pos)[:2] p1s = self._transform.imap(p1) p2s = self._transform.imap(p2) self.pan(p1s-p2s) event.handled = True elif 2 in event.buttons and not modifiers: # Zoom p1c = np.array(event.last_event.pos)[:2] p2c = np.array(event.pos)[:2] scale = ((1 + self.zoom_factor) ** ((p1c-p2c) * np.array([1, -1]))) center = self._transform.imap(event.press_event.pos[:2]) self.zoom(scale, center) event.handled = True else: event.handled = False elif event.type == 'mouse_press': # accept the event if it is button 1 or 2. # This is required in order to receive future events event.handled = event.button in [1, 2] else: event.handled = False
Set the volume data. Parameters ---------- vol : ndarray The 3D volume. clim : tuple | None Colormap limits to use. None will use the min and max values. def set_data(self, vol, clim=None): """ Set the volume data. Parameters ---------- vol : ndarray The 3D volume. clim : tuple | None Colormap limits to use. None will use the min and max values. """ # Check volume if not isinstance(vol, np.ndarray): raise ValueError('Volume visual needs a numpy array.') if not ((vol.ndim == 3) or (vol.ndim == 4 and vol.shape[-1] <= 4)): raise ValueError('Volume visual needs a 3D image.') # Handle clim if clim is not None: clim = np.array(clim, float) if not (clim.ndim == 1 and clim.size == 2): raise ValueError('clim must be a 2-element array-like') self._clim = tuple(clim) if self._clim is None: self._clim = vol.min(), vol.max() # Apply clim vol = np.array(vol, dtype='float32', copy=False) if self._clim[1] == self._clim[0]: if self._clim[0] != 0.: vol *= 1.0 / self._clim[0] else: vol -= self._clim[0] vol /= self._clim[1] - self._clim[0] # Apply to texture self._tex.set_data(vol) # will be efficient if vol is same shape self.shared_program['u_shape'] = (vol.shape[2], vol.shape[1], vol.shape[0]) shape = vol.shape[:3] if self._vol_shape != shape: self._vol_shape = shape self._need_vertex_update = True self._vol_shape = shape # Get some stats self._kb_for_texture = np.prod(self._vol_shape) / 1024
Create and set positions and texture coords from the given shape We have six faces with 1 quad (2 triangles) each, resulting in 6*2*3 = 36 vertices in total. def _create_vertex_data(self): """ Create and set positions and texture coords from the given shape We have six faces with 1 quad (2 triangles) each, resulting in 6*2*3 = 36 vertices in total. """ shape = self._vol_shape # Get corner coordinates. The -0.5 offset is to center # pixels/voxels. This works correctly for anisotropic data. x0, x1 = -0.5, shape[2] - 0.5 y0, y1 = -0.5, shape[1] - 0.5 z0, z1 = -0.5, shape[0] - 0.5 pos = np.array([ [x0, y0, z0], [x1, y0, z0], [x0, y1, z0], [x1, y1, z0], [x0, y0, z1], [x1, y0, z1], [x0, y1, z1], [x1, y1, z1], ], dtype=np.float32) """ 6-------7 /| /| 4-------5 | | | | | | 2-----|-3 |/ |/ 0-------1 """ # Order is chosen such that normals face outward; front faces will be # culled. indices = np.array([2, 6, 0, 4, 5, 6, 7, 2, 3, 0, 1, 5, 3, 7], dtype=np.uint32) # Apply self._vertices.set_data(pos) self._index_buffer.set_data(indices)
Set the data Parameters ---------- pos : list, tuple or numpy array Bounds of the region along the axis. len(pos) must be >=2. color : list, tuple, or array The color to use when drawing the line. It must have a shape of (1, 4) for a single color region or (len(pos), 4) for a multicolor region. def set_data(self, pos=None, color=None): """Set the data Parameters ---------- pos : list, tuple or numpy array Bounds of the region along the axis. len(pos) must be >=2. color : list, tuple, or array The color to use when drawing the line. It must have a shape of (1, 4) for a single color region or (len(pos), 4) for a multicolor region. """ new_pos = self._pos new_color = self._color if pos is not None: num_elements = len(pos) pos = np.array(pos, dtype=np.float32) if pos.ndim != 1: raise ValueError('Expected 1D array') vertex = np.empty((num_elements * 2, 2), dtype=np.float32) if self._is_vertical: vertex[:, 0] = np.repeat(pos, 2) vertex[:, 1] = np.tile([-1, 1], num_elements) else: vertex[:, 1] = np.repeat(pos, 2) vertex[:, 0] = np.tile([1, -1], num_elements) new_pos = vertex self._changed['pos'] = True if color is not None: color = np.array(color, dtype=np.float32) num_elements = new_pos.shape[0] / 2 if color.ndim == 2: if color.shape[0] != num_elements: raise ValueError('Expected a color for each pos') if color.shape[1] != 4: raise ValueError('Each color must be a RGBA array') color = np.repeat(color, 2, axis=0).astype(np.float32) elif color.ndim == 1: if color.shape[0] != 4: raise ValueError('Each color must be a RGBA array') color = np.repeat([color], new_pos.shape[0], axis=0) color = color.astype(np.float32) else: raise ValueError('Expected a numpy array of shape ' '(%d, 4) or (1, 4)' % num_elements) new_color = color self._changed['color'] = True # Ensure pos and color have the same size if new_pos.shape[0] != new_color.shape[0]: raise ValueError('pos and color does must have the same size') self._color = new_color self._pos = new_pos
This method is called immediately before each draw. The *view* argument indicates which view is about to be drawn. def _prepare_draw(self, view=None): """This method is called immediately before each draw. The *view* argument indicates which view is about to be drawn. """ if self._changed['pos']: self.pos_buf.set_data(self._pos) self._changed['pos'] = False if self._changed['color']: self.color_buf.set_data(self._color) self._program.vert['color'] = self.color_buf self._changed['color'] = False return True
Repopulate cache def refresh_cache(self, cat_id): ''' Repopulate cache ''' self.cache[cat_id] = most_recent_25_posts_by_category(cat_id) self.last_refresh[cat_id] = datetime.now() print ('Cache refresh at...', str(self.last_refresh[cat_id]))
Merge overlapping intervals. This method is called only once in the constructor. def _merge_intervals(self, min_depth): """ Merge overlapping intervals. This method is called only once in the constructor. """ def add_interval(ret, start, stop): if min_depth is not None: shift = 2 * (29 - min_depth) mask = (int(1) << shift) - 1 if stop - start < mask: ret.append((start, stop)) else: ofs = start & mask st = start if ofs > 0: st = (start - ofs) + (mask + 1) ret.append((start, st)) while st + mask + 1 < stop: ret.append((st, st + mask + 1)) st = st + mask + 1 ret.append((st, stop)) else: ret.append((start, stop)) ret = [] start = stop = None # Use numpy sort method self._intervals.sort(axis=0) for itv in self._intervals: if start is None: start, stop = itv continue # gap between intervals if itv[0] > stop: add_interval(ret, start, stop) start, stop = itv else: # merge intervals if itv[1] > stop: stop = itv[1] if start is not None and stop is not None: add_interval(ret, start, stop) self._intervals = np.asarray(ret)
Return the union between self and ``another_is``. Parameters ---------- another_is : `IntervalSet` an IntervalSet object. Returns ------- interval : `IntervalSet` the union of self with ``another_is``. def union(self, another_is): """ Return the union between self and ``another_is``. Parameters ---------- another_is : `IntervalSet` an IntervalSet object. Returns ------- interval : `IntervalSet` the union of self with ``another_is``. """ result = IntervalSet() if another_is.empty(): result._intervals = self._intervals elif self.empty(): result._intervals = another_is._intervals else: # res has no overlapping intervals result._intervals = IntervalSet.merge(self._intervals, another_is._intervals, lambda in_a, in_b: in_a or in_b) return result
Convert an IntervalSet using the NESTED numbering scheme to an IntervalSet containing UNIQ numbers for HEALPix cells. Parameters ---------- nested_is : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals. Returns ------- interval : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals. def to_nuniq_interval_set(cls, nested_is): """ Convert an IntervalSet using the NESTED numbering scheme to an IntervalSet containing UNIQ numbers for HEALPix cells. Parameters ---------- nested_is : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals. Returns ------- interval : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals. """ r2 = nested_is.copy() res = [] if r2.empty(): return IntervalSet() order = 0 while not r2.empty(): shift = int(2 * (IntervalSet.HPY_MAX_ORDER - order)) ofs = (int(1) << shift) - 1 ofs2 = int(1) << (2 * order + 2) r4 = [] for iv in r2._intervals: a = (int(iv[0]) + ofs) >> shift b = int(iv[1]) >> shift c = a << shift d = b << shift if d > c: r4.append((c, d)) res.append((a + ofs2, b + ofs2)) if len(r4) > 0: r4_is = IntervalSet(np.asarray(r4)) r2 = r2.difference(r4_is) order += 1 return IntervalSet(np.asarray(res))
Convert an IntervalSet containing NUNIQ intervals to an IntervalSet representing HEALPix cells following the NESTED numbering scheme. Parameters ---------- nuniq_is : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals. Returns ------- interval : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals. def from_nuniq_interval_set(cls, nuniq_is): """ Convert an IntervalSet containing NUNIQ intervals to an IntervalSet representing HEALPix cells following the NESTED numbering scheme. Parameters ---------- nuniq_is : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix + 4*4^(order), ipix+1 + 4*4^(order)[ intervals. Returns ------- interval : `IntervalSet` IntervalSet object storing HEALPix cells as [ipix*4^(29-order), (ipix+1)*4^(29-order)[ intervals. """ nested_is = IntervalSet() # Appending a list is faster than appending a numpy array # For these algorithms we append a list and create the interval set from the finished list rtmp = [] last_order = 0 intervals = nuniq_is._intervals diff_order = IntervalSet.HPY_MAX_ORDER shift_order = 2 * diff_order for interval in intervals: for j in range(interval[0], interval[1]): order, i_pix = uniq2orderipix(j) if order != last_order: nested_is = nested_is.union(IntervalSet(np.asarray(rtmp))) rtmp = [] last_order = order diff_order = IntervalSet.HPY_MAX_ORDER - order shift_order = 2 * diff_order rtmp.append((i_pix << shift_order, (i_pix + 1) << shift_order)) nested_is = nested_is.union(IntervalSet(np.asarray(rtmp))) return nested_is
Merge two lists of intervals according to the boolean function op ``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals). This operation keeps the resulting interval set consistent. Parameters ---------- a_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array b_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array op : `function` Lambda function taking two params and returning the result of the operation between these two params. Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and ``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and ``b_intervals``. Returns ------- array : `numpy.ndarray` a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``. def merge(a_intervals, b_intervals, op): """ Merge two lists of intervals according to the boolean function op ``a_intervals`` and ``b_intervals`` need to be sorted and consistent (no overlapping intervals). This operation keeps the resulting interval set consistent. Parameters ---------- a_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array b_intervals : `~numpy.ndarray` A sorted merged list of intervals represented as a N x 2 numpy array op : `function` Lambda function taking two params and returning the result of the operation between these two params. Exemple : lambda in_a, in_b: in_a and in_b describes the intersection of ``a_intervals`` and ``b_intervals`` whereas lambda in_a, in_b: in_a or in_b describes the union of ``a_intervals`` and ``b_intervals``. Returns ------- array : `numpy.ndarray` a N x 2 numpy containing intervals resulting from the op between ``a_intervals`` and ``b_intervals``. """ a_endpoints = a_intervals.flatten().tolist() b_endpoints = b_intervals.flatten().tolist() sentinel = max(a_endpoints[-1], b_endpoints[-1]) + 1 a_endpoints += [sentinel] b_endpoints += [sentinel] a_index = 0 b_index = 0 res = [] scan = min(a_endpoints[0], b_endpoints[0]) while scan < sentinel: in_a = not ((scan < a_endpoints[a_index]) ^ (a_index % 2)) in_b = not ((scan < b_endpoints[b_index]) ^ (b_index % 2)) in_res = op(in_a, in_b) if in_res ^ (len(res) % 2): res += [scan] if scan == a_endpoints[a_index]: a_index += 1 if scan == b_endpoints[b_index]: b_index += 1 scan = min(a_endpoints[a_index], b_endpoints[b_index]) return np.asarray(res).reshape((-1, 2))
Delete the object from GPU memory. Note that the GPU object will also be deleted when this gloo object is about to be deleted. However, sometimes you want to explicitly delete the GPU object explicitly. def delete(self): """ Delete the object from GPU memory. Note that the GPU object will also be deleted when this gloo object is about to be deleted. However, sometimes you want to explicitly delete the GPU object explicitly. """ # We only allow the object from being deleted once, otherwise # we might be deleting another GPU object that got our gl-id # after our GPU object was deleted. Also note that e.g. # DataBufferView does not have the _glir attribute. if hasattr(self, '_glir'): # Send our final command into the queue self._glir.command('DELETE', self._id) # Tell master glir queue that this queue is no longer being used self._glir._deletable = True # Detach the queue del self._glir
Set the data Parameters ---------- image : array-like The image data. def set_data(self, image): """Set the data Parameters ---------- image : array-like The image data. """ data = np.asarray(image) if self._data is None or self._data.shape != data.shape: self._need_vertex_update = True self._data = data self._need_texture_upload = True
Rebuild the _data_lookup_fn using different interpolations within the shader def _build_interpolation(self): """Rebuild the _data_lookup_fn using different interpolations within the shader """ interpolation = self._interpolation self._data_lookup_fn = self._interpolation_fun[interpolation] self.shared_program.frag['get_data'] = self._data_lookup_fn # only 'bilinear' uses 'linear' texture interpolation if interpolation == 'bilinear': texture_interpolation = 'linear' else: # 'nearest' (and also 'bilinear') doesn't use spatial_filters.frag # so u_kernel and shape setting is skipped texture_interpolation = 'nearest' if interpolation != 'nearest': self.shared_program['u_kernel'] = self._kerneltex self._data_lookup_fn['shape'] = self._data.shape[:2][::-1] if self._texture.interpolation != texture_interpolation: self._texture.interpolation = texture_interpolation self._data_lookup_fn['texture'] = self._texture self._need_interpolation_update = False
Rebuild the vertex buffers used for rendering the image when using the subdivide method. def _build_vertex_data(self): """Rebuild the vertex buffers used for rendering the image when using the subdivide method. """ grid = self._grid w = 1.0 / grid[1] h = 1.0 / grid[0] quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0], [0, 0, 0], [w, h, 0], [0, h, 0]], dtype=np.float32) quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32) quads[:] = quad mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0) mgrid = mgrid[:, :, np.newaxis, :] mgrid[..., 0] *= w mgrid[..., 1] *= h quads[..., :2] += mgrid tex_coords = quads.reshape(grid[1]*grid[0]*6, 3) tex_coords = np.ascontiguousarray(tex_coords[:, :2]) vertices = tex_coords * self.size self._subdiv_position.set_data(vertices.astype('float32')) self._subdiv_texcoord.set_data(tex_coords.astype('float32'))
Decide which method to use for *view* and configure it accordingly. def _update_method(self, view): """Decide which method to use for *view* and configure it accordingly. """ method = self._method if method == 'auto': if view.transforms.get_transform().Linear: method = 'subdivide' else: method = 'impostor' view._method_used = method if method == 'subdivide': view.view_program['method'] = 0 view.view_program['a_position'] = self._subdiv_position view.view_program['a_texcoord'] = self._subdiv_texcoord elif method == 'impostor': view.view_program['method'] = 1 view.view_program['a_position'] = self._impostor_coords view.view_program['a_texcoord'] = self._impostor_coords else: raise ValueError("Unknown image draw method '%s'" % method) self.shared_program['image_size'] = self.size view._need_method_update = False self._prepare_transforms(view)
Append a new set of vertices to the collection. For kwargs argument, n is the number of vertices (local) or the number of item (shared) Parameters ---------- P : np.array Vertices positions of the path(s) to be added closed: bool Whether path(s) is/are closed itemsize: int or None Size of an individual path caps : list, array or 2-tuple Path start /end cap color : list, array or 4-tuple Path color linewidth : list, array or float Path linewidth antialias : list, array or float Path antialias area def append(self, P, closed=False, itemsize=None, **kwargs): """ Append a new set of vertices to the collection. For kwargs argument, n is the number of vertices (local) or the number of item (shared) Parameters ---------- P : np.array Vertices positions of the path(s) to be added closed: bool Whether path(s) is/are closed itemsize: int or None Size of an individual path caps : list, array or 2-tuple Path start /end cap color : list, array or 4-tuple Path color linewidth : list, array or float Path linewidth antialias : list, array or float Path antialias area """ itemsize = itemsize or len(P) itemcount = len(P) / itemsize P = P.reshape(itemcount, itemsize, 3) if closed: V = np.empty((itemcount, itemsize + 3), dtype=self.vtype) # Apply default values on vertices for name in self.vtype.names: if name not in ['collection_index', 'prev', 'curr', 'next']: V[name][1:-2] = kwargs.get(name, self._defaults[name]) V['prev'][:, 2:-1] = P V['prev'][:, 1] = V['prev'][:, -2] V['curr'][:, 1:-2] = P V['curr'][:, -2] = V['curr'][:, 1] V['next'][:, 0:-3] = P V['next'][:, -3] = V['next'][:, 0] V['next'][:, -2] = V['next'][:, 1] else: V = np.empty((itemcount, itemsize + 2), dtype=self.vtype) # Apply default values on vertices for name in self.vtype.names: if name not in ['collection_index', 'prev', 'curr', 'next']: V[name][1:-1] = kwargs.get(name, self._defaults[name]) V['prev'][:, 2:] = P V['prev'][:, 1] = V['prev'][:, 2] V['curr'][:, 1:-1] = P V['next'][:, :-2] = P V['next'][:, -2] = V['next'][:, -3] V[:, 0] = V[:, 1] V[:, -1] = V[:, -2] V = V.ravel() V = np.repeat(V, 2, axis=0) V['id'] = np.tile([1, -1], len(V) / 2) if closed: V = V.reshape(itemcount, 2 * (itemsize + 3)) else: V = V.reshape(itemcount, 2 * (itemsize + 2)) V["id"][:, :2] = 2, -2 V["id"][:, -2:] = 2, -2 V = V.ravel() # Uniforms if self.utype: U = np.zeros(itemcount, dtype=self.utype) for name in self.utype.names: if name not in ["__unused__"]: U[name] = kwargs.get(name, self._defaults[name]) else: U = None Collection.append(self, vertices=V, uniforms=U, itemsize=2 * (itemsize + 2 + closed))
Given a path P, return the baked vertices as they should be copied in the collection if the path has already been appended. Example: -------- paths.append(P) P *= 2 paths['prev'][0] = bake(P,'prev') paths['curr'][0] = bake(P,'curr') paths['next'][0] = bake(P,'next') def bake(self, P, key='curr', closed=False, itemsize=None): """ Given a path P, return the baked vertices as they should be copied in the collection if the path has already been appended. Example: -------- paths.append(P) P *= 2 paths['prev'][0] = bake(P,'prev') paths['curr'][0] = bake(P,'curr') paths['next'][0] = bake(P,'next') """ itemsize = itemsize or len(P) itemcount = len(P) / itemsize # noqa n = itemsize if closed: I = np.arange(n + 3) if key == 'prev': I -= 2 I[0], I[1], I[-1] = n - 1, n - 1, n - 1 elif key == 'next': I[0], I[-3], I[-2], I[-1] = 1, 0, 1, 1 else: I -= 1 I[0], I[-1], I[n + 1] = 0, 0, 0 else: I = np.arange(n + 2) if key == 'prev': I -= 2 I[0], I[1], I[-1] = 0, 0, n - 2 elif key == 'next': I[0], I[-1], I[-2] = 1, n - 1, n - 1 else: I -= 1 I[0], I[-1] = 0, n - 1 I = np.repeat(I, 2) return P[I]
Draw collection def draw(self, mode="triangle_strip"): """ Draw collection """ gl.glDepthMask(gl.GL_FALSE) Collection.draw(self, mode) gl.glDepthMask(gl.GL_TRUE)
Stop all timers in a canvas. def _stop_timers(canvas): """Stop all timers in a canvas.""" for attr in dir(canvas): try: attr_obj = getattr(canvas, attr) except NotImplementedError: # This try/except is needed because canvas.position raises # an error (it is not implemented in this backend). attr_obj = None if isinstance(attr_obj, Timer): attr_obj.stop()
Print stack trace from call that didn't originate from here def _last_stack_str(): """Print stack trace from call that didn't originate from here""" stack = extract_stack() for s in stack[::-1]: if op.join('vispy', 'gloo', 'buffer.py') not in __file__: break return format_list([s])[0]
Set a sub-region of the buffer (deferred operation). Parameters ---------- data : ndarray Data to be uploaded offset: int Offset in buffer where to start copying data (in bytes) copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. def set_subdata(self, data, offset=0, copy=False): """ Set a sub-region of the buffer (deferred operation). Parameters ---------- data : ndarray Data to be uploaded offset: int Offset in buffer where to start copying data (in bytes) copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. """ data = np.array(data, copy=copy) nbytes = data.nbytes if offset < 0: raise ValueError("Offset must be positive") elif (offset + nbytes) > self._nbytes: raise ValueError("Data does not fit into buffer") # If the whole buffer is to be written, we clear any pending data # (because they will be overwritten anyway) if nbytes == self._nbytes and offset == 0: self._glir.command('SIZE', self._id, nbytes) self._glir.command('DATA', self._id, offset, data)
Set data in the buffer (deferred operation). This completely resets the size and contents of the buffer. Parameters ---------- data : ndarray Data to be uploaded copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. def set_data(self, data, copy=False): """ Set data in the buffer (deferred operation). This completely resets the size and contents of the buffer. Parameters ---------- data : ndarray Data to be uploaded copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. """ data = np.array(data, copy=copy) nbytes = data.nbytes if nbytes != self._nbytes: self.resize_bytes(nbytes) else: # Use SIZE to discard any previous data setting self._glir.command('SIZE', self._id, nbytes) if nbytes: # Only set data if there *is* data self._glir.command('DATA', self._id, 0, data)
Resize this buffer (deferred operation). Parameters ---------- size : int New buffer size in bytes. def resize_bytes(self, size): """ Resize this buffer (deferred operation). Parameters ---------- size : int New buffer size in bytes. """ self._nbytes = size self._glir.command('SIZE', self._id, size) # Invalidate any view on this buffer for view in self._views: if view() is not None: view()._valid = False self._views = []
Set a sub-region of the buffer (deferred operation). Parameters ---------- data : ndarray Data to be uploaded offset: int Offset in buffer where to start copying data (in bytes) copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. **kwargs : dict Additional keyword arguments. def set_subdata(self, data, offset=0, copy=False, **kwargs): """ Set a sub-region of the buffer (deferred operation). Parameters ---------- data : ndarray Data to be uploaded offset: int Offset in buffer where to start copying data (in bytes) copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. **kwargs : dict Additional keyword arguments. """ data = self._prepare_data(data, **kwargs) offset = offset * self.itemsize Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
Set data (deferred operation) Parameters ---------- data : ndarray Data to be uploaded copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. **kwargs : dict Additional arguments. def set_data(self, data, copy=False, **kwargs): """ Set data (deferred operation) Parameters ---------- data : ndarray Data to be uploaded copy: bool Since the operation is deferred, data may change before data is actually uploaded to GPU memory. Asking explicitly for a copy will prevent this behavior. **kwargs : dict Additional arguments. """ data = self._prepare_data(data, **kwargs) self._dtype = data.dtype self._stride = data.strides[-1] self._itemsize = self._dtype.itemsize Buffer.set_data(self, data=data, copy=copy)
GLSL declaration strings required for a variable to hold this data. def glsl_type(self): """ GLSL declaration strings required for a variable to hold this data. """ if self.dtype is None: return None dtshape = self.dtype[0].shape n = dtshape[0] if dtshape else 1 if n > 1: dtype = 'vec%d' % n else: dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int' return 'attribute', dtype
Resize the buffer (in-place, deferred operation) Parameters ---------- size : integer New buffer size in bytes Notes ----- This clears any pending operations. def resize_bytes(self, size): """ Resize the buffer (in-place, deferred operation) Parameters ---------- size : integer New buffer size in bytes Notes ----- This clears any pending operations. """ Buffer.resize_bytes(self, size) self._size = size // self.itemsize
Compile all code and return a dict {name: code} where the keys are determined by the keyword arguments passed to __init__(). Parameters ---------- pretty : bool If True, use a slower method to mangle object names. This produces GLSL that is more readable. If False, then the output is mostly unreadable GLSL, but is about 10x faster to compile. def compile(self, pretty=True): """ Compile all code and return a dict {name: code} where the keys are determined by the keyword arguments passed to __init__(). Parameters ---------- pretty : bool If True, use a slower method to mangle object names. This produces GLSL that is more readable. If False, then the output is mostly unreadable GLSL, but is about 10x faster to compile. """ # Authoritative mapping of {obj: name} self._object_names = {} # # 1. collect list of dependencies for each shader # # maps {shader_name: [deps]} self._shader_deps = {} for shader_name, shader in self.shaders.items(): this_shader_deps = [] self._shader_deps[shader_name] = this_shader_deps dep_set = set() for dep in shader.dependencies(sort=True): # visit each object no more than once per shader if dep.name is None or dep in dep_set: continue this_shader_deps.append(dep) dep_set.add(dep) # # 2. Assign names to all objects. # if pretty: self._rename_objects_pretty() else: self._rename_objects_fast() # # 3. Now we have a complete namespace; concatenate all definitions # together in topological order. # compiled = {} obj_names = self._object_names for shader_name, shader in self.shaders.items(): code = [] for dep in self._shader_deps[shader_name]: dep_code = dep.definition(obj_names) if dep_code is not None: # strip out version pragma if present; regex = r'#version (\d+)' m = re.search(regex, dep_code) if m is not None: # check requested version if m.group(1) != '120': raise RuntimeError("Currently only GLSL #version " "120 is supported.") dep_code = re.sub(regex, '', dep_code) code.append(dep_code) compiled[shader_name] = '\n'.join(code) self.code = compiled return compiled
Rename all objects quickly to guaranteed-unique names using the id() of each object. This produces mostly unreadable GLSL, but is about 10x faster to compile. def _rename_objects_fast(self): """ Rename all objects quickly to guaranteed-unique names using the id() of each object. This produces mostly unreadable GLSL, but is about 10x faster to compile. """ for shader_name, deps in self._shader_deps.items(): for dep in deps: name = dep.name if name != 'main': ext = '_%x' % id(dep) name = name[:32-len(ext)] + ext self._object_names[dep] = name