repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bokeh/bokeh | src/bokeh/plotting/graph.py | 1 | 5921 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..models.graphs import StaticLayoutProvider
from ..models.renderers import GraphRenderer
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'from_networkx'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def from_networkx(graph, layout_function, **kwargs):
'''
Generate a ``GraphRenderer`` from a ``networkx.Graph`` object and networkx
layout function. Any keyword arguments will be passed to the
layout function.
Only two dimensional layouts are supported.
Args:
graph (networkx.Graph) : a networkx graph to render
layout_function (function or dict) : a networkx layout function or mapping of node keys to positions.
The position is a two element sequence containing the x and y coordinate.
Returns:
instance (GraphRenderer)
.. note::
Node and edge attributes may be lists or tuples. However, a given
attribute must either have *all* lists or tuple values, or *all*
scalar values, for nodes or edges it is defined on.
.. warning::
Node attributes labeled 'index' and edge attributes labeled 'start' or 'end' are ignored.
If you want to convert these attributes, please re-label them to other names.
Raises:
ValueError
'''
# Handles nx 1.x vs 2.x data structure change
# Convert node attributes
node_dict = dict()
node_attr_keys = [attr_key for node in list(graph.nodes(data=True))
for attr_key in node[1].keys()]
node_attr_keys = list(set(node_attr_keys))
for attr_key in node_attr_keys:
values = [node_attr[attr_key] if attr_key in node_attr.keys() else None
for _, node_attr in graph.nodes(data=True)]
values = _handle_sublists(values)
node_dict[attr_key] = values
if 'index' in node_attr_keys:
from warnings import warn
warn("Converting node attributes labeled 'index' are skipped. "
"If you want to convert these attributes, please re-label with other names.")
node_dict['index'] = list(graph.nodes())
# Convert edge attributes
edge_dict = dict()
edge_attr_keys = [attr_key for edge in graph.edges(data=True)
for attr_key in edge[2].keys()]
edge_attr_keys = list(set(edge_attr_keys))
for attr_key in edge_attr_keys:
values = [edge_attr[attr_key] if attr_key in edge_attr.keys() else None
for _, _, edge_attr in graph.edges(data=True)]
values = _handle_sublists(values)
edge_dict[attr_key] = values
if 'start' in edge_attr_keys or 'end' in edge_attr_keys:
from warnings import warn
warn("Converting edge attributes labeled 'start' or 'end' are skipped. "
"If you want to convert these attributes, please re-label them with other names.")
edge_dict['start'] = [x[0] for x in graph.edges()]
edge_dict['end'] = [x[1] for x in graph.edges()]
graph_renderer = GraphRenderer()
graph_renderer.node_renderer.data_source.data = node_dict
graph_renderer.edge_renderer.data_source.data = edge_dict
if callable(layout_function):
graph_layout = layout_function(graph, **kwargs)
else:
graph_layout = layout_function
node_keys = graph_renderer.node_renderer.data_source.data['index']
if set(node_keys) != set(layout_function.keys()):
from warnings import warn
warn("Node keys in 'layout_function' don't match node keys in the graph. "
"These nodes may not be displayed correctly.")
graph_renderer.layout_provider = StaticLayoutProvider(graph_layout=graph_layout)
return graph_renderer
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _handle_sublists(values):
# if any of the items is non-scalar, they all must be
if any(isinstance(x, (list, tuple)) for x in values):
if not all(isinstance(x, (list, tuple)) for x in values if x is not None):
raise ValueError("Can't mix scalar and non-scalar values for graph attributes")
return [[] if x is None else list(x) for x in values]
return values
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | af86ea99aa84c0c33313522792753909 | 39.278912 | 113 | 0.485222 | 5.148696 | false | false | false | false |
bokeh/bokeh | src/bokeh/core/property/serialized.py | 1 | 2536 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Provide ``NotSerialized`` property. """
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any, TypeVar
# Bokeh imports
from ._sphinx import property_link, register_type_link, type_link
from .bases import (
Init,
Property,
SingleParameterizedProperty,
TypeOrInst,
)
from .singletons import Intrinsic
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"NotSerialized",
)
T = TypeVar("T")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class NotSerialized(SingleParameterizedProperty[T]):
"""
A property which state won't be synced with the browser.
"""
_serialized = False
def __init__(self, type_param: TypeOrInst[Property[T]], *,
default: Init[T] = Intrinsic, help: str | None = None) -> None:
super().__init__(type_param, default=default, help=help)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
@register_type_link(NotSerialized)
def _sphinx_type_link(obj: SingleParameterizedProperty[Any]) -> str:
return f"{property_link(obj)}({type_link(obj.type_param)})"
| bsd-3-clause | 21d68f8e9ae222c866591fc00c8fdcf1 | 33.739726 | 78 | 0.344637 | 6.40404 | false | false | false | false |
joerick/pyinstrument | pyinstrument/frame_info.py | 1 | 1038 | from typing import List, Tuple
# pyright: strict
IDENTIFIER_SEP = "\x00"
ATTRIBUTES_SEP = "\x01"
ATTRIBUTE_MARKER_CLASS_NAME = "c"
ATTRIBUTE_MARKER_LINE_NUMBER = "l"
ATTRIBUTE_MARKER_TRACEBACKHIDE = "h"
def parse_frame_info(frame_info: str) -> Tuple[str, List[str]]:
"""
Parses a frame_info string, returns a tuple of (identifier, attributes),
where `identifier` is a unique identifier for this code (e.g. a function
or method), and `attributes` is a list of invocation-specific attributes
that were captured at profile-time.
"""
identifier, _, attributes_str = frame_info.partition(ATTRIBUTES_SEP)
if not attributes_str:
return identifier, []
return identifier, attributes_str.split(ATTRIBUTES_SEP)
def frame_info_get_identifier(frame_info: str) -> str:
"""
Equivalent to `parse_frame_info(frame_info)[0]`, but faster.
"""
index = frame_info.find(ATTRIBUTES_SEP)
if index == -1:
# no attributes
return frame_info
return frame_info[0:index]
| bsd-3-clause | 0627a10444fccc1ffa7f28fc9ba19438 | 24.95 | 76 | 0.675337 | 3.616725 | false | false | false | false |
airspeed-velocity/asv | asv/benchmarks.py | 1 | 11558 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import os
import re
import tempfile
import itertools
from .console import log
from . import util, runner
from .repo import NoSuchNameError
class Benchmarks(dict):
"""
Manages and runs the set of benchmarks in the project.
"""
api_version = 2
def __init__(self, conf, benchmarks, regex=None):
"""
Initialize a list of benchmarks.
Parameters
----------
conf : Config object
The project's configuration
benchmarks : list
Benchmarks as from Benchmarks._disc_benchmarks
or loaded from a file.
regex : str or list of str, optional
`regex` is a list of regular expressions matching the
benchmarks to run. If none are provided, all benchmarks
are run.
For parameterized benchmarks, the regex match against
`funcname(param0, param1, ...)` to include the parameter
combination in regex filtering.
"""
self._conf = conf
self._benchmark_dir = conf.benchmark_dir
if not regex:
regex = []
if isinstance(regex, str):
regex = [regex]
self._all_benchmarks = {}
self._benchmark_selection = {}
for benchmark in benchmarks:
self._all_benchmarks[benchmark['name']] = benchmark
if benchmark['params']:
self._benchmark_selection[benchmark['name']] = []
for idx, param_set in enumerate(
itertools.product(*benchmark['params'])):
name = '%s(%s)' % (
benchmark['name'],
', '.join(param_set))
if not regex or any(re.search(reg, name) for reg in regex):
self[benchmark['name']] = benchmark
self._benchmark_selection[benchmark['name']].append(idx)
else:
self._benchmark_selection[benchmark['name']] = None
if not regex or any(re.search(reg, benchmark['name']) for reg in regex):
self[benchmark['name']] = benchmark
@property
def benchmark_selection(self):
"""
Active sets of parameterized benchmarks.
"""
return self._benchmark_selection
@property
def benchmark_dir(self):
"""
Benchmark directory.
"""
return self._benchmark_dir
def filter_out(self, skip):
"""
Return a new Benchmarks object, with some benchmarks filtered out.
"""
benchmarks = super(Benchmarks, self).__new__(self.__class__)
benchmarks._conf = self._conf
benchmarks._benchmark_dir = self._benchmark_dir
benchmarks._all_benchmarks = self._all_benchmarks
selected_idx = {}
for name, benchmark in self.items():
if name not in skip:
benchmarks[name] = benchmark
if name in self._benchmark_selection:
selected_idx[name] = self._benchmark_selection[name]
benchmarks._benchmark_selection = selected_idx
return benchmarks
@classmethod
def discover(cls, conf, repo, environments, commit_hash, regex=None,
check=False):
"""
Discover benchmarks in the given `benchmark_dir`.
Parameters
----------
conf : Config object
The project's configuration
repo : Repo object
The project's repository
environments : list of Environment
List of environments available for benchmark discovery.
commit_hash : list of str
Commit hashes to use for benchmark discovery.
regex : str or list of str, optional
`regex` is a list of regular expressions matching the
benchmarks to run. If none are provided, all benchmarks
are run.
check : bool
Run additional checks after discovery.
"""
benchmarks = cls._disc_benchmarks(conf, repo, environments, commit_hash, check)
return cls(conf, benchmarks, regex=regex)
@classmethod
def _disc_benchmarks(cls, conf, repo, environments, commit_hashes, check):
"""
Discover all benchmarks in a directory tree.
"""
root = conf.benchmark_dir
cls.check_tree(root)
if len(environments) == 0:
raise util.UserError("No available environments")
# Try several different commits:
#
# - First of commit_hashes provided
# - Tips of branches from configuration file
# - Rest of the commit_hashes
#
def iter_hashes():
for h in commit_hashes[:1]:
yield h
for branch in conf.branches:
try:
yield repo.get_hash_from_name(branch)
except NoSuchNameError:
continue
for h in commit_hashes[1:]:
yield h
def iter_unique(iter):
seen = set()
for item in iter:
if item not in seen:
seen.add(item)
yield item
try_hashes = iter_unique(iter_hashes())
log.info("Discovering benchmarks")
with log.indent():
last_err = None
for env, commit_hash in itertools.product(environments, try_hashes):
env.create()
if last_err is not None:
log.warning("Failed: trying different commit/environment")
result_dir = tempfile.mkdtemp()
try:
env.install_project(conf, repo, commit_hash)
env_vars = dict(os.environ)
env_vars.update(env.env_vars)
result_file = os.path.join(result_dir, 'result.json')
env.run(
[runner.BENCHMARK_RUN_SCRIPT, 'discover',
os.path.abspath(root),
os.path.abspath(result_file)],
cwd=result_dir,
env=env_vars,
dots=False)
try:
with open(result_file, 'r') as fp:
benchmarks = json.load(fp)
except (IOError, ValueError):
log.error("Invalid discovery output")
raise util.UserError()
break
except (util.UserError, util.ProcessError) as err:
last_err = err
continue
except KeyboardInterrupt:
raise util.UserError("Interrupted.")
finally:
util.long_path_rmtree(result_dir)
else:
raise util.UserError("Failed to build the project and import the benchmark suite.")
if check:
log.info("Checking benchmarks")
with log.indent():
result_dir = tempfile.mkdtemp()
try:
out, err, retcode = env.run(
[runner.BENCHMARK_RUN_SCRIPT, 'check',
os.path.abspath(root)],
cwd=result_dir,
dots=False,
env=env_vars,
valid_return_codes=None,
return_stderr=True,
redirect_stderr=True)
finally:
util.long_path_rmtree(result_dir)
out = out.strip()
if retcode == 0:
if out:
log.info(out)
log.info("No problems found.")
else:
if out:
log.error(out)
raise util.UserError("Benchmark suite check failed.")
return benchmarks
@classmethod
def check_tree(cls, root, require_init_py=True):
"""
Check the benchmark tree for files with the same name as
directories.
Also, ensure that the top-level directory has an __init__.py file.
Raises
------
UserError
A .py file and directory with the same name (excluding the
extension) were found.
"""
if os.path.basename(root) == '__pycache__':
return
if not os.path.isfile(os.path.join(root, '__init__.py')):
# Not a Python package directory
if require_init_py:
raise util.UserError(
"No __init__.py file in '{0}'".format(root))
else:
return
# First, check for the case where a .py file and a directory
# have the same name (without the extension). This can't be
# handled, so just raise an exception
found = set()
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isfile(path):
filename, ext = os.path.splitext(filename)
if ext == '.py':
found.add(filename)
for dirname in os.listdir(root):
path = os.path.join(root, dirname)
if os.path.isdir(path):
if dirname in found:
raise util.UserError(
"Found a directory and python file with same name in "
"benchmark tree: '{0}'".format(path))
cls.check_tree(path, require_init_py=False)
@classmethod
def get_benchmark_file_path(cls, results_dir):
"""
Get the path to the benchmarks.json file in the results dir.
"""
return os.path.join(results_dir, "benchmarks.json")
def save(self):
"""
Save the ``benchmarks.json`` file, which is a cached set of the
metadata about the discovered benchmarks, in the results dir.
"""
path = self.get_benchmark_file_path(self._conf.results_dir)
util.write_json(path, self._all_benchmarks, self.api_version)
@classmethod
def load(cls, conf, regex=None):
"""
Load the benchmark descriptions from the `benchmarks.json` file.
If the file is not found, one of the given `environments` will
be used to discover benchmarks.
Parameters
----------
conf : Config object
The project's configuration
regex : str or list of str, optional
`regex` is a list of regular expressions matching the
benchmarks to load. See __init__ docstring.
Returns
-------
benchmarks : Benchmarks object
"""
try:
path = cls.get_benchmark_file_path(conf.results_dir)
if not os.path.isfile(path):
raise util.UserError("Benchmark list file {} missing!".format(path))
d = util.load_json(path, api_version=cls.api_version)
benchmarks = d.values()
return cls(conf, benchmarks, regex=regex)
except util.UserError as err:
if "asv update" in str(err):
# Don't give conflicting instructions
raise
raise util.UserError("{}\nUse `asv run --bench just-discover` to "
"regenerate benchmarks.json".format(str(err)))
| bsd-3-clause | 1ec7bf11e26377f3f2cbceaa8f0d73f8 | 33.195266 | 99 | 0.521284 | 4.858344 | false | false | false | false |
joblib/joblib | joblib/externals/loky/backend/resource_tracker.py | 2 | 14359 | ###############################################################################
# Server process to keep track of unlinked resources, like folders and
# semaphores and clean them.
#
# author: Thomas Moreau
#
# adapted from multiprocessing/semaphore_tracker.py (17/02/2017)
# * include custom spawnv_passfds to start the process
# * add some VERBOSE logging
#
# TODO: multiprocessing.resource_tracker was contributed to Python 3.8 so
# once loky drops support for Python 3.7 it might be possible to stop
# maintaining this loky-specific fork. As a consequence, it might also be
# possible to stop maintaining the loky.backend.synchronize fork of
# multiprocessing.synchronize.
#
# On Unix we run a server process which keeps track of unlinked
# resources. The server ignores SIGINT and SIGTERM and reads from a
# pipe. The resource_tracker implements a reference counting scheme: each time
# a Python process anticipates the shared usage of a resource by another
# process, it signals the resource_tracker of this shared usage, and in return,
# the resource_tracker increments the resource's reference count by 1.
# Similarly, when access to a resource is closed by a Python process, the
# process notifies the resource_tracker by asking it to decrement the
# resource's reference count by 1. When the reference count drops to 0, the
# resource_tracker attempts to clean up the underlying resource.
# Finally, every other process connected to the resource tracker has a copy of
# the writable end of the pipe used to communicate with it, so the resource
# tracker gets EOF when all other processes have exited. Then the
# resource_tracker process unlinks any remaining leaked resources (with
# reference count above 0)
# For semaphores, this is important because the system only supports a limited
# number of named semaphores, and they will not be automatically removed till
# the next reboot. Without this resource tracker process, "killall python"
# would probably leave unlinked semaphores.
# Note that this behavior differs from CPython's resource_tracker, which only
# implements list of shared resources, and not a proper refcounting scheme.
# Also, CPython's resource tracker will only attempt to cleanup those shared
# resources once all procsses connected to the resouce tracker have exited.
import os
import shutil
import sys
import signal
import warnings
import threading
from _multiprocessing import sem_unlink
from multiprocessing import util
from . import spawn
if sys.platform == "win32":
import _winapi
import msvcrt
from multiprocessing.reduction import duplicate
__all__ = ['ensure_running', 'register', 'unregister']
_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
_CLEANUP_FUNCS = {
'folder': shutil.rmtree,
'file': os.unlink
}
if os.name == "posix":
_CLEANUP_FUNCS['semlock'] = sem_unlink
VERBOSE = False
class ResourceTracker:
def __init__(self):
self._lock = threading.Lock()
self._fd = None
self._pid = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that resource tracker process is running.
This can be run from any process. Usually a child process will use
the resource created by its parent.'''
with self._lock:
if self._fd is not None:
# resource tracker was launched before, is it still running?
if self._check_alive():
# => still alive
return
# => dead, launch it again
os.close(self._fd)
if os.name == "posix":
try:
# At this point, the resource_tracker process has been
# killed or crashed. Let's remove the process entry
# from the process table to avoid zombie processes.
os.waitpid(self._pid, 0)
except OSError:
# The process was terminated or is a child from an
# ancestor of the current process.
pass
self._fd = None
self._pid = None
warnings.warn('resource_tracker: process died unexpectedly, '
'relaunching. Some folders/sempahores might '
'leak.')
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
r, w = os.pipe()
if sys.platform == "win32":
_r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
os.close(r)
r = _r
cmd = f'from {main.__module__} import main; main({r}, {VERBOSE})'
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe, *util._args_from_interpreter_flags(), '-c', cmd]
util.debug(f"launching resource tracker: {args}")
# bpo-33613: Register a signal mask that will block the
# signals. This signal mask will be inherited by the child
# that is going to be spawned and will protect the child from a
# race condition that can make the child die before it
# registers signal handlers for SIGINT and SIGTERM. The mask is
# unregistered after spawning the child.
try:
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_BLOCK,
_IGNORED_SIGNALS)
pid = spawnv_passfds(exe, args, fds_to_pass)
finally:
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_UNBLOCK,
_IGNORED_SIGNALS)
except BaseException:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
if sys.platform == "win32":
_winapi.CloseHandle(r)
else:
os.close(r)
def _check_alive(self):
'''Check for the existence of the resource tracker process.'''
try:
self._send('PROBE', '', '')
except BrokenPipeError:
return False
else:
return True
def register(self, name, rtype):
'''Register a named resource, and increment its refcount.'''
self.ensure_running()
self._send('REGISTER', name, rtype)
def unregister(self, name, rtype):
'''Unregister a named resource with resource tracker.'''
self.ensure_running()
self._send('UNREGISTER', name, rtype)
def maybe_unlink(self, name, rtype):
'''Decrement the refcount of a resource, and delete it if it hits 0'''
self.ensure_running()
self._send("MAYBE_UNLINK", name, rtype)
def _send(self, cmd, name, rtype):
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
msg = f'{cmd}:{name}:{rtype}\n'.encode('ascii')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_resource_tracker = ResourceTracker()
ensure_running = _resource_tracker.ensure_running
register = _resource_tracker.register
maybe_unlink = _resource_tracker.maybe_unlink
unregister = _resource_tracker.unregister
getfd = _resource_tracker.getfd
def main(fd, verbose=0):
'''Run resource tracker.'''
# protect the process from ^C and "killall python" etc
if verbose:
util.log_to_stderr(level=util.DEBUG)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
if verbose:
util.debug("Main resource tracker is running")
registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
try:
# keep track of registered/unregistered resources
if sys.platform == "win32":
fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
with open(fd, 'rb') as f:
while True:
line = f.readline()
if line == b'': # EOF
break
try:
splitted = line.strip().decode('ascii').split(':')
# name can potentially contain separator symbols (for
# instance folders on Windows)
cmd, name, rtype = (
splitted[0], ':'.join(splitted[1:-1]), splitted[-1])
if cmd == 'PROBE':
continue
if rtype not in _CLEANUP_FUNCS:
raise ValueError(
f'Cannot register {name} for automatic cleanup: '
f'unknown resource type ({rtype}). Resource type '
'should be one of the following: '
f'{list(_CLEANUP_FUNCS.keys())}'
)
if cmd == 'REGISTER':
if name not in registry[rtype]:
registry[rtype][name] = 1
else:
registry[rtype][name] += 1
if verbose:
util.debug(
"[ResourceTracker] incremented refcount of "
f"{rtype} {name} "
f"(current {registry[rtype][name]})"
)
elif cmd == 'UNREGISTER':
del registry[rtype][name]
if verbose:
util.debug(
f"[ResourceTracker] unregister {name} {rtype}: "
f"registry({len(registry)})"
)
elif cmd == 'MAYBE_UNLINK':
registry[rtype][name] -= 1
if verbose:
util.debug(
"[ResourceTracker] decremented refcount of "
f"{rtype} {name} "
f"(current {registry[rtype][name]})"
)
if registry[rtype][name] == 0:
del registry[rtype][name]
try:
if verbose:
util.debug(
f"[ResourceTracker] unlink {name}"
)
_CLEANUP_FUNCS[rtype](name)
except Exception as e:
warnings.warn(
f'resource_tracker: {name}: {e!r}')
else:
raise RuntimeError(f'unrecognized command {cmd!r}')
except BaseException:
try:
sys.excepthook(*sys.exc_info())
except BaseException:
pass
finally:
# all processes have terminated; cleanup any remaining resources
def _unlink_resources(rtype_registry, rtype):
if rtype_registry:
try:
warnings.warn(
'resource_tracker: There appear to be '
f'{len(rtype_registry)} leaked {rtype} objects to '
'clean up at shutdown'
)
except Exception:
pass
for name in rtype_registry:
# For some reason the process which created and registered this
# resource has failed to unregister it. Presumably it has
# died. We therefore clean it up.
try:
_CLEANUP_FUNCS[rtype](name)
if verbose:
util.debug(f"[ResourceTracker] unlink {name}")
except Exception as e:
warnings.warn(f'resource_tracker: {name}: {e!r}')
for rtype, rtype_registry in registry.items():
if rtype == "folder":
continue
else:
_unlink_resources(rtype_registry, rtype)
# The default cleanup routine for folders deletes everything inside
# those folders recursively, which can include other resources tracked
# by the resource tracker). To limit the risk of the resource tracker
# attempting to delete twice a resource (once as part of a tracked
# folder, and once as a resource), we delete the folders after all
# other resource types.
if "folder" in registry:
_unlink_resources(registry["folder"], "folder")
if verbose:
util.debug("resource tracker shut down")
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
passfds = sorted(passfds)
if sys.platform != "win32":
errpipe_read, errpipe_write = os.pipe()
try:
from .reduction import _mk_inheritable
from .fork_exec import fork_exec
_pass = [_mk_inheritable(fd) for fd in passfds]
return fork_exec(args, _pass)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
else:
cmd = ' '.join(f'"{x}"' for x in args)
try:
_, ht, pid, _ = _winapi.CreateProcess(
path, cmd, None, None, True, 0, None, None, None)
_winapi.CloseHandle(ht)
except BaseException:
pass
return pid
| bsd-3-clause | 93fc7e8398b890b4a450735822577031 | 37.703504 | 80 | 0.532419 | 4.72025 | false | false | false | false |
joblib/joblib | benchmarks/bench_auto_batching.py | 6 | 4331 | """Benchmark batching="auto" on high number of fast tasks
The goal of this script is to study the behavior of the batch_size='auto'
and in particular the impact of the default value of the
joblib.parallel.MIN_IDEAL_BATCH_DURATION constant.
"""
# Author: Olivier Grisel
# License: BSD 3 clause
import numpy as np
import time
import tempfile
from pprint import pprint
from joblib import Parallel, delayed
from joblib._parallel_backends import AutoBatchingMixin
def sleep_noop(duration, input_data, output_data_size):
"""Noop function to emulate real computation.
Simulate CPU time with by sleeping duration.
Induce overhead by accepting (and ignoring) any amount of data as input
and allocating a requested amount of data.
"""
time.sleep(duration)
if output_data_size:
return np.ones(output_data_size, dtype=np.byte)
def bench_short_tasks(task_times, n_jobs=2, batch_size="auto",
pre_dispatch="2*n_jobs", verbose=True,
input_data_size=0, output_data_size=0, backend=None,
memmap_input=False):
with tempfile.NamedTemporaryFile() as temp_file:
if input_data_size:
# Generate some input data with the required size
if memmap_input:
temp_file.close()
input_data = np.memmap(temp_file.name, shape=input_data_size,
dtype=np.byte, mode='w+')
input_data[:] = 1
else:
input_data = np.ones(input_data_size, dtype=np.byte)
else:
input_data = None
t0 = time.time()
p = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch,
batch_size=batch_size, backend=backend)
p(delayed(sleep_noop)(max(t, 0), input_data, output_data_size)
for t in task_times)
duration = time.time() - t0
effective_batch_size = getattr(p._backend, '_effective_batch_size',
p.batch_size)
print('Completed {} tasks in {:3f}s, final batch_size={}\n'.format(
len(task_times), duration, effective_batch_size))
return duration, effective_batch_size
if __name__ == "__main__":
bench_parameters = dict(
# batch_size=200, # batch_size='auto' by default
# memmap_input=True, # if True manually memmap input out of timing
# backend='threading', # backend='multiprocessing' by default
# pre_dispatch='n_jobs', # pre_dispatch="2*n_jobs" by default
input_data_size=int(2e7), # input data size in bytes
output_data_size=int(1e5), # output data size in bytes
n_jobs=2,
verbose=10,
)
print("Common benchmark parameters:")
pprint(bench_parameters)
AutoBatchingMixin.MIN_IDEAL_BATCH_DURATION = 0.2
AutoBatchingMixin.MAX_IDEAL_BATCH_DURATION = 2
# First pair of benchmarks to check that the auto-batching strategy is
# stable (do not change the batch size too often) in the presence of large
# variance while still be comparable to the equivalent load without
# variance
print('# high variance, no trend')
# censored gaussian distribution
high_variance = np.random.normal(loc=0.000001, scale=0.001, size=5000)
high_variance[high_variance < 0] = 0
bench_short_tasks(high_variance, **bench_parameters)
print('# low variance, no trend')
low_variance = np.empty_like(high_variance)
low_variance[:] = np.mean(high_variance)
bench_short_tasks(low_variance, **bench_parameters)
# Second pair of benchmarks: one has a cycling task duration pattern that
# the auto batching feature should be able to roughly track. We use an even
# power of cos to get only positive task durations with a majority close to
# zero (only data transfer overhead). The shuffle variant should not
# oscillate too much and still approximately have the same total run time.
print('# cyclic trend')
slow_time = 0.1
positive_wave = np.cos(np.linspace(1, 4 * np.pi, 300)) ** 8
cyclic = positive_wave * slow_time
bench_short_tasks(cyclic, **bench_parameters)
print("shuffling of the previous benchmark: same mean and variance")
np.random.shuffle(cyclic)
bench_short_tasks(cyclic, **bench_parameters)
| bsd-3-clause | 3e6694eb62265b82df7eb3ee305ab4f9 | 38.018018 | 79 | 0.650196 | 3.799123 | false | false | false | false |
joblib/joblib | joblib/externals/loky/backend/fork_exec.py | 2 | 1185 | ###############################################################################
# Launch a subprocess using forkexec and make sure only the needed fd are
# shared in the two process.
#
# author: Thomas Moreau and Olivier Grisel
#
import os
import sys
def close_fds(keep_fds): # pragma: no cover
"""Close all the file descriptors except those in keep_fds."""
# Make sure to keep stdout and stderr open for logging purpose
keep_fds = {*keep_fds, 1, 2}
# We try to retrieve all the open fds
try:
open_fds = {int(fd) for fd in os.listdir('/proc/self/fd')}
except FileNotFoundError:
import resource
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
open_fds = {*range(max_nfds)}
for i in open_fds - keep_fds:
try:
os.close(i)
except OSError:
pass
def fork_exec(cmd, keep_fds, env=None):
# copy the environment variables to set in the child process
env = env or {}
child_env = {**os.environ, **env}
pid = os.fork()
if pid == 0: # pragma: no cover
close_fds(keep_fds)
os.execve(sys.executable, cmd, child_env)
else:
return pid
| bsd-3-clause | 203dc15b900650a8ac18e17668a627a5 | 27.214286 | 79 | 0.578059 | 3.726415 | false | false | false | false |
airspeed-velocity/asv | test/test_gh_pages.py | 1 | 2843 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import asv.util
from . import tools
@pytest.mark.parametrize("rewrite", [False, True], ids=["no-rewrite", "rewrite"])
def test_gh_pages(rewrite, tmpdir, generate_result_dir, monkeypatch):
tmpdir = os.path.abspath(str(tmpdir))
monkeypatch.setenv(str('EMAIL'), str('test@asv'))
monkeypatch.setenv(str('GIT_COMMITTER_NAME'), str('asv test'))
monkeypatch.setenv(str('GIT_AUTHOR_NAME'), str('asv test'))
conf, repo, commits = generate_result_dir([1, 2, 3, 4])
dvcs_dir = os.path.join(tmpdir, 'repo1')
dvcs_dir2 = os.path.join(tmpdir, 'repo2')
os.makedirs(dvcs_dir)
os.chdir(dvcs_dir)
dvcs = tools.Git(dvcs_dir)
dvcs.init()
open(os.path.join(dvcs_dir, 'dummy'), 'wb').close()
dvcs.add('dummy')
dvcs.commit('Initial commit')
if rewrite:
rewrite_args = ("--rewrite",)
else:
rewrite_args = ()
# Check with no existing gh-pages branch, no push
tools.run_asv_with_conf(conf, "gh-pages", "--no-push", *rewrite_args)
dvcs.checkout('gh-pages')
assert os.path.isfile(os.path.join(dvcs_dir, 'index.html'))
assert len(dvcs.run_git(['rev-list', 'gh-pages']).splitlines()) == 1
dvcs.checkout('master')
assert not os.path.isfile(os.path.join(dvcs_dir, 'index.html'))
# Check with existing (and checked out) gh-pages branch, with no changes
tools.run_asv_with_conf(conf, "gh-pages", "--no-push", *rewrite_args)
dvcs.checkout('gh-pages')
assert os.path.isfile(os.path.join(dvcs_dir, 'index.html'))
if rewrite:
assert len(dvcs.run_git(['rev-list', 'gh-pages']).splitlines()) == 1
else:
# Timestamp may have changed
assert len(dvcs.run_git(['rev-list', 'gh-pages']).splitlines()) <= 2
dvcs.checkout('master')
# Check with existing (not checked out) gh-pages branch, with some changes
benchmarks_json = os.path.join(conf.results_dir, 'benchmarks.json')
data = asv.util.load_json(benchmarks_json)
data['time_func']['pretty_name'] = 'something changed'
asv.util.write_json(benchmarks_json, data)
prev_len = len(dvcs.run_git(['rev-list', 'gh-pages']).splitlines())
tools.run_asv_with_conf(conf, "gh-pages", "--no-push", *rewrite_args)
if not rewrite:
assert len(dvcs.run_git(['rev-list', 'gh-pages']).splitlines()) == prev_len + 1
else:
assert len(dvcs.run_git(['rev-list', 'gh-pages']).splitlines()) == prev_len
# Check that the push option works
dvcs.run_git(['branch', '-D', 'gh-pages'])
dvcs.run_git(['clone', dvcs_dir, dvcs_dir2])
os.chdir(dvcs_dir2)
tools.run_asv_with_conf(conf, "gh-pages", *rewrite_args)
os.chdir(dvcs_dir)
dvcs.checkout('gh-pages')
assert os.path.isfile(os.path.join(dvcs_dir, 'index.html'))
| bsd-3-clause | c4b5a8c2cc9440ad9543e38174e11112 | 33.670732 | 87 | 0.641224 | 3.066882 | false | true | false | false |
joblib/joblib | joblib/_deprecated_format_stack.py | 4 | 14505 | """
Represent an exception with a lot of information.
Provides 2 useful functions:
format_exc: format an exception into a complete traceback, with full
debugging instruction.
format_outer_frames: format the current position in the stack call.
Adapted from IPython's VerboseTB.
"""
# Authors: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Nathaniel Gray <n8gray@caltech.edu>
# Fernando Perez <fperez@colorado.edu>
# Copyright: 2010, Gael Varoquaux
# 2001-2004, Fernando Perez
# 2001 Nathaniel Gray
# License: BSD 3 clause
# flake8: noqa
import inspect
import keyword
import linecache
import os
import pydoc
import sys
import time
import tokenize
import traceback
INDENT = ' ' * 8
###############################################################################
# some internal-use functions
def safe_repr(value):
"""Hopefully pretty robust repr equivalent."""
# this is pretty horrible but should always return *something*
try:
return pydoc.text.repr(value)
except KeyboardInterrupt:
raise
except:
try:
return repr(value)
except KeyboardInterrupt:
raise
except:
try:
# all still in an except block so we catch
# getattr raising
name = getattr(value, '__name__', None)
if name:
# ick, recursion
return safe_repr(name)
klass = getattr(value, '__class__', None)
if klass:
return '%s instance' % safe_repr(klass)
except KeyboardInterrupt:
raise
except:
return 'UNRECOVERABLE REPR FAILURE'
def eq_repr(value, repr=safe_repr):
return '=%s' % repr(value)
###############################################################################
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
A naive solution to this problem which just makes a dictionary with the
elements as keys fails to respect the stability condition, since
dictionaries are unsorted by nature.
Note: All elements in the input must be hashable.
"""
unique = []
unique_set = set()
for nn in elems:
if nn not in unique_set:
unique.append(nn)
unique_set.add(nn)
return unique
###############################################################################
def fix_frame_records_filenames(records):
"""Try to fix the filenames in each record from inspect.getinnerframes().
Particularly, modules loaded from within zip files have useless filenames
attached to their code object, and inspect.getinnerframes() just uses it.
"""
fixed_records = []
for frame, filename, line_no, func_name, lines, index in records:
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
fixed_records.append((frame, filename, line_no, func_name, lines,
index))
return fixed_records
def _fixed_getframes(etb, context=1, tb_offset=0):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
# If the error is at the console, don't build any context, since it would
# otherwise produce 5 blank lines printed out (there is no file at the
# console)
rec_check = records[tb_offset:]
try:
rname = rec_check[0][1]
if rname == '<ipython console>' or rname.endswith('<string>'):
return rec_check
except IndexError:
pass
aux = traceback.extract_tb(etb)
assert len(records) == len(aux)
for i, (file, lnum, _, _) in enumerate(aux):
maybe_start = lnum - 1 - context // 2
start = max(maybe_start, 0)
end = start + context
lines = linecache.getlines(file)[start:end]
buf = list(records[i])
buf[LNUM_POS] = lnum
buf[INDEX_POS] = lnum - 1 - start
buf[LINES_POS] = lines
records[i] = tuple(buf)
return records[tb_offset:]
def _format_traceback_lines(lnum, index, lines, lvals=None):
numbers_width = 7
res = []
i = lnum - index
for line in lines:
if i == lnum:
# This is the line with the error
pad = numbers_width - len(str(i))
if pad >= 3:
marker = '-' * (pad - 3) + '-> '
elif pad == 2:
marker = '> '
elif pad == 1:
marker = '>'
else:
marker = ''
num = marker + str(i)
else:
num = '%*s' % (numbers_width, i)
line = '%s %s' % (num, line)
res.append(line)
if lvals and i == lnum:
res.append(lvals + '\n')
i = i + 1
return res
def format_records(records): # , print_globals=False):
# Loop over all records printing context and info
frames = []
abspath = os.path.abspath
for frame, file, lnum, func, lines, index in records:
try:
file = file and abspath(file) or '?'
except OSError:
# if file is '<console>' or something not in the filesystem,
# the abspath call will throw an OSError. Just ignore it and
# keep the original file string.
pass
if file.endswith('.pyc'):
file = file[:-4] + '.py'
link = file
args, varargs, varkw, locals = inspect.getargvalues(frame)
if func == '?':
call = ''
else:
# Decide whether to include variable details or not
try:
call = 'in %s%s' % (func, inspect.formatargvalues(args,
varargs, varkw, locals,
formatvalue=eq_repr))
except KeyError:
# Very odd crash from inspect.formatargvalues(). The
# scenario under which it appeared was a call to
# view(array,scale) in NumTut.view.view(), where scale had
# been defined as a scalar (it should be a tuple). Somehow
# inspect messes up resolving the argument list of view()
# and barfs out. At some point I should dig into this one
# and file a bug report about it.
print("\nJoblib's exception reporting continues...\n")
call = 'in %s(***failed resolving arguments***)' % func
# Initialize a list of names on the current line, which the
# tokenizer below will populate.
names = []
def tokeneater(token_type, token, start, end, line):
"""Stateful tokeneater which builds dotted names.
The list of names it appends to (from the enclosing scope) can
contain repeated composite names. This is unavoidable, since
there is no way to disambiguate partial dotted structures until
the full list is known. The caller is responsible for pruning
the final list of duplicates before using it."""
# build composite names
if token == '.':
try:
names[-1] += '.'
# store state so the next token is added for x.y.z names
tokeneater.name_cont = True
return
except IndexError:
pass
if token_type == tokenize.NAME and token not in keyword.kwlist:
if tokeneater.name_cont:
# Dotted names
names[-1] += token
tokeneater.name_cont = False
else:
# Regular new names. We append everything, the caller
# will be responsible for pruning the list later. It's
# very tricky to try to prune as we go, b/c composite
# names can fool us. The pruning at the end is easy
# to do (or the caller can print a list with repeated
# names if so desired.
names.append(token)
elif token_type == tokenize.NEWLINE:
raise IndexError
# we need to store a bit of state in the tokenizer to build
# dotted names
tokeneater.name_cont = False
def linereader(file=file, lnum=[lnum], getline=linecache.getline):
line = getline(file, lnum[0])
lnum[0] += 1
return line
# Build the list of names on this line of code where the exception
# occurred.
try:
# This builds the names list in-place by capturing it from the
# enclosing scope.
for token in tokenize.generate_tokens(linereader):
tokeneater(*token)
except (IndexError, UnicodeDecodeError, SyntaxError):
# signals exit of tokenizer
# SyntaxError can happen when trying to tokenize
# a compiled (e.g. .so or .pyd) extension
pass
except tokenize.TokenError as msg:
_m = ("An unexpected error occurred while tokenizing input file %s\n"
"The following traceback may be corrupted or invalid\n"
"The error message is: %s\n" % (file, msg))
print(_m)
# prune names list of duplicates, but keep the right order
unique_names = uniq_stable(names)
# Start loop over vars
lvals = []
for name_full in unique_names:
name_base = name_full.split('.', 1)[0]
if name_base in frame.f_code.co_varnames:
if name_base in locals.keys():
try:
value = safe_repr(eval(name_full, locals))
except:
value = "undefined"
else:
value = "undefined"
name = name_full
lvals.append('%s = %s' % (name, value))
#elif print_globals:
# if frame.f_globals.has_key(name_base):
# try:
# value = safe_repr(eval(name_full,frame.f_globals))
# except:
# value = "undefined"
# else:
# value = "undefined"
# name = 'global %s' % name_full
# lvals.append('%s = %s' % (name,value))
if lvals:
lvals = '%s%s' % (INDENT, ('\n%s' % INDENT).join(lvals))
else:
lvals = ''
level = '%s\n%s %s\n' % (75 * '.', link, call)
if index is None:
frames.append(level)
else:
frames.append('%s%s' % (level, ''.join(
_format_traceback_lines(lnum, index, lines, lvals))))
return frames
###############################################################################
def format_exc(etype, evalue, etb, context=5, tb_offset=0):
""" Return a nice text document describing the traceback.
Parameters
-----------
etype, evalue, etb: as returned by sys.exc_info
context: number of lines of the source file to plot
tb_offset: the number of stack frame not to use (0 = use all)
"""
# some locals
try:
etype = etype.__name__
except AttributeError:
pass
# Header with the exception type, python version, and date
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
pid = 'PID: %i' % os.getpid()
head = '%s%s%s\n%s%s%s' % (
etype, ' ' * (75 - len(str(etype)) - len(date)),
date, pid, ' ' * (75 - len(str(pid)) - len(pyver)),
pyver)
# Drop topmost frames if requested
records = _fixed_getframes(etb, context, tb_offset)
# Get (safely) a string form of the exception info
try:
etype_str, evalue_str = map(str, (etype, evalue))
except BaseException:
# User exception is improperly defined.
etype, evalue = str, sys.exc_info()[:2]
etype_str, evalue_str = map(str, (etype, evalue))
# ... and format it
exception = ['%s: %s' % (etype_str, evalue_str)]
frames = format_records(records)
return '%s\n%s\n%s' % (head, '\n'.join(frames), ''.join(exception[0]))
###############################################################################
def format_outer_frames(context=5, stack_start=None, stack_end=None,
ignore_ipython=True):
LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
records = inspect.getouterframes(inspect.currentframe())
output = list()
for i, (frame, filename, line_no, func_name, lines, index) \
in enumerate(records):
# Look inside the frame's globals dictionary for __file__, which should
# be better.
better_fn = frame.f_globals.get('__file__', None)
if isinstance(better_fn, str):
# Check the type just in case someone did something weird with
# __file__. It might also be None if the error occurred during
# import.
filename = better_fn
if filename.endswith('.pyc'):
filename = filename[:-4] + '.py'
if ignore_ipython:
# Hack to avoid printing the internals of IPython
if (os.path.basename(filename) in ('iplib.py', 'py3compat.py')
and func_name in ('execfile', 'safe_execfile', 'runcode')):
break
maybe_start = line_no - 1 - context // 2
start = max(maybe_start, 0)
end = start + context
lines = linecache.getlines(filename)[start:end]
buf = list(records[i])
buf[LNUM_POS] = line_no
buf[INDEX_POS] = line_no - 1 - start
buf[LINES_POS] = lines
output.append(tuple(buf))
return '\n'.join(format_records(output[stack_end:stack_start:-1]))
| bsd-3-clause | bdc67ad7ec447ba9a62cad411b25322e | 35.536524 | 83 | 0.53354 | 4.269944 | false | false | false | false |
airspeed-velocity/asv | asv/plugins/mercurial.py | 1 | 5818 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Supports mercurial repositories for the benchmarked project.
"""
import os
import sys
import re
try:
import hglib
except ImportError:
hglib = None
from ..console import log
from ..repo import Repo, NoSuchNameError
from .. import util
class Hg(Repo):
dvcs = "hg"
_default_branch = "default"
_encoding = "utf-8"
def __init__(self, url, mirror_path):
# TODO: shared repositories in Mercurial are only possible
# through an extension, and it's not clear how to use those in
# this context. So here, we always make full clones for
# each of the environments.
self._path = os.path.abspath(mirror_path)
self._pulled = False
if hglib is None:
raise ImportError("hglib")
if self.is_local_repo(url):
# Local repository, no need for mirror
self._path = os.path.abspath(url)
self._pulled = True
elif not self.is_local_repo(self._path):
if os.path.exists(self._path):
self._raise_bad_mirror_error(self._path)
# Clone is missing
log.info("Cloning project")
if url.startswith("hg+"):
url = url[3:]
# Mercurial branches are global, so there is no need for
# an analog of git --mirror
hglib.clone(self._encode_filename(url),
dest=self._encode_filename(self._path),
noupdate=True)
self._repo = hglib.open(self._encode_filename(self._path))
def __del__(self):
if self._repo is not None:
self._repo.close()
self._repo = None
def _decode(self, item):
return item.decode(self._encoding)
def _encode(self, item):
return item.encode(self._encoding)
def _encode_filename(self, filename):
return filename.encode(sys.getfilesystemencoding())
@classmethod
def is_local_repo(cls, path):
return (os.path.isdir(path) and
os.path.isdir(os.path.join(path, '.hg')))
@classmethod
def url_match(cls, url):
regexes = [
r'^hg\+https?://.*$',
r'^https?://.*?\.hg$',
r'^ssh://hg@.*$']
for regex in regexes:
if re.match(regex, url):
return True
# Check for a local path
if cls.is_local_repo(url):
return True
return False
def get_range_spec(self, commit_a, commit_b):
return '{0}::{1} and not {0}'.format(commit_a, commit_b)
def get_new_range_spec(self, latest_result, branch=None):
return '{0}::{1}'.format(latest_result, self.get_branch_name(branch))
def pull(self):
# We assume the remote isn't updated during the run of asv
# itself.
if self._pulled:
return
log.info("Fetching recent changes")
self._repo.pull()
self._pulled = True
def checkout(self, path, commit_hash):
# Need to pull -- the copy is not updated automatically, since
# the repository data is not shared
def checkout_existing():
with hglib.open(self._encode_filename(path)) as subrepo:
subrepo.pull()
subrepo.update(self._encode(commit_hash), clean=True)
subrepo.rawcommand([b"--config",
b"extensions.purge=",
b"purge",
b"--all"])
if os.path.isdir(path):
try:
checkout_existing()
except (hglib.error.CommandError, hglib.error.ServerError):
# Remove and re-clone
util.long_path_rmtree(path)
if not os.path.isdir(path):
hglib.clone(self._encode_filename(self._path),
dest=self._encode_filename(path))
checkout_existing()
def get_date(self, hash):
# TODO: This works on Linux, but should be extended for other platforms
rev = self._repo.log(self._encode(hash))[0]
return int(rev.date.strftime("%s")) * 1000
def get_hashes_from_range(self, range_spec, **kwargs):
range_spec = self._encode("sort({0}, -rev)".format(range_spec))
return [self._decode(rev.node) for rev in self._repo.log(range_spec, **kwargs)]
def get_hash_from_name(self, name):
if name is None:
name = self.get_branch_name()
try:
return self._decode(self._repo.log(self._encode(name))[0].node)
except hglib.error.CommandError as err:
if b'unknown revision' in err.err:
raise NoSuchNameError(name)
raise
def get_hash_from_parent(self, name):
return self.get_hash_from_name('p1({0})'.format(name))
def get_name_from_hash(self, commit):
# XXX: implement
return None
def get_tags(self):
tags = {}
for item in self._repo.log(b"tag()"):
tags[self._decode(item.tags)] = self._decode(item.node)
return tags
def get_date_from_name(self, name):
return self.get_date(name)
def get_branch_commits(self, branch):
if self._repo.version >= (4, 5):
query = "branch({0})"
else:
query = "ancestors({0})"
return self.get_hashes_from_range(query.format(self.get_branch_name(branch)),
followfirst=True)
def get_revisions(self, commits):
revisions = {}
for i, item in enumerate(self._repo.log(b"all()")):
node = self._decode(item.node)
if node in commits:
revisions[node] = i
return revisions
| bsd-3-clause | 88e52be2808eefe7ca7d0a5e4d526e5a | 30.619565 | 87 | 0.55208 | 3.963215 | false | false | false | false |
joblib/joblib | joblib/pool.py | 4 | 14334 | """Custom implementation of multiprocessing.Pool with custom pickler.
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Copyright: 2012, Olivier Grisel
# License: BSD 3 clause
import copyreg
import sys
import warnings
from time import sleep
try:
WindowsError
except NameError:
WindowsError = type(None)
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._memmapping_reducer import get_memmapping_reducers
from ._memmapping_reducer import TemporaryResourcesManager
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it, not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
except ImportError:
np = None
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
TODO python2_drop : can this be simplified ?
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.7
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from https://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table."""
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memory mapped datastructures.
`reducers` is expected to be a dict with key / values being
`(type, callable)` pairs where `callable` is a function that, given an
instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
to rebuild an instance out of the pickled `tuple_of_objects` as would
return a `__reduce__` method.
See the standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that, given an instance of `type`, will return a
tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
pickled `tuple_of_objects` as would return a `__reduce__` method.
See the standard library documentation about pickling for more details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
class MemmappingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmapping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: (str, callable) optional
If str:
Folder to be used by the pool for memmapping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
if callable:
An callable in charge of dynamically resolving a temporary folder
for memmapping large arrays.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder.
Use None to disable memmapping of large arrays.
mmap_mode: {'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmapping)
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmapped array to make sure that OS
pre-cache it in memory. This can be useful to avoid concurrent disk
access when the same data array is passed to different worker
processes. If "auto" (by default), prewarm is set to True, unless the
Linux shared memory partition /dev/shm is available and used as temp
folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if context_id is not None:
warnings.warn('context_id is deprecated and ignored in joblib'
' 0.9.4 and will be removed in 0.11',
DeprecationWarning)
manager = TemporaryResourcesManager(temp_folder)
self._temp_folder_manager = manager
# The usage of a temp_folder_resolver over a simple temp_folder is
# superfluous for multiprocessing pools, as they don't get reused, see
# get_memmapping_executor for more details. We still use it for code
# simplicity.
forward_reducers, backward_reducers = \
get_memmapping_reducers(
temp_folder_resolver=manager.resolve_temp_folder_name,
max_nbytes=max_nbytes, mmap_mode=mmap_mode,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers, verbose=verbose,
unlink_on_gc_collect=False, prewarm=prewarm)
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmappingPool, self).__init__(**poolargs)
def terminate(self):
n_retries = 10
for i in range(n_retries):
try:
super(MemmappingPool, self).terminate()
break
except OSError as e:
if isinstance(e, WindowsError):
# Workaround occasional "[Error 5] Access is denied" issue
# when trying to terminate a process under windows.
sleep(0.1)
if i + 1 == n_retries:
warnings.warn("Failed to terminate worker processes in"
" multiprocessing pool: %r" % e)
self._temp_folder_manager._unlink_temporary_resources()
@property
def _temp_folder(self):
# Legacy property in tests. could be removed if we refactored the
# memmapping tests. SHOULD ONLY BE USED IN TESTS!
# We cache this property because it is called late in the tests - at
# this point, all context have been unregistered, and
# resolve_temp_folder_name raises an error.
if getattr(self, '_cached_temp_folder', None) is not None:
return self._cached_temp_folder
else:
self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa
return self._cached_temp_folder
| bsd-3-clause | 7dffce81efcc42e0fb1a247397723898 | 39.721591 | 99 | 0.64797 | 4.496236 | false | false | false | false |
airspeed-velocity/asv | asv/statistics.py | 1 | 15299 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Author: Pauli Virtanen, 2016
import math
from operator import index
def compute_stats(samples, number):
"""
Statistical analysis of measured samples.
Parameters
----------
samples : list of float
List of total times (y) of benchmarks.
number : int
Repeat number for each sample.
Returns
-------
beta_hat : float
Estimated time per iteration
stats : dict
Information on statistics of the estimator.
"""
if len(samples) < 1:
return None, None
Y = list(samples)
# Median and quantiles
y_50, ci_50 = quantile_ci(Y, 0.5, alpha_min=0.99)
y_25 = quantile(Y, 0.25)
y_75 = quantile(Y, 0.75)
# If nonparametric CI estimation didn't give an estimate,
# use the credible interval of a bayesian posterior distribution.
a, b = ci_50
if (math.isinf(a) or math.isinf(b)) and len(Y) > 1:
# Compute posterior distribution for location, assuming
# exponential noise. The MLE is equal to the median.
c = LaplacePosterior(Y)
# Use the CI from that distribution to extend beyond sample
# bounds
if math.isinf(a):
a = min(c.ppf(0.01 / 2), min(Y))
if math.isinf(b):
b = max(c.ppf(1 - 0.01 / 2), max(Y))
ci_50 = (a, b)
# Produce results
result = y_50
stats = {'ci_99_a': ci_50[0],
'ci_99_b': ci_50[1],
'q_25': y_25,
'q_75': y_75,
'repeat': len(Y),
'number': number}
return result, stats
def get_err(result, stats):
"""
Return an 'error measure' suitable for informing the user
about the spread of the measurement results.
"""
a, b = stats['q_25'], stats['q_75']
return (b - a) / 2
def get_weight(stats):
"""
Return a data point weight for the result.
"""
if stats is None or 'ci_99_a' not in stats or 'ci_99_b' not in stats:
return None
try:
a = stats['ci_99_a']
b = stats['ci_99_b']
if math.isinf(a) or math.isinf(b):
# Infinite interval is due to too few samples --- consider
# weight as missing
return None
return 2 / abs(b - a)
except ZeroDivisionError:
return None
def is_different(samples_a, samples_b, stats_a, stats_b, p_threshold=0.002):
"""Check whether the samples are statistically different.
If sample data is not provided, or the sample is too small, falls
back to a pessimistic CI-based check. If it returns True, then the
difference is statistically significant. If it returns False, it
might or might not be statistically significant.
Parameters
----------
samples_a, samples_b
Input samples
stats_a, stats_b
Input stats data
"""
if samples_a is not None and samples_b is not None:
# Raw data present: Mann-Whitney U test, but only if there's
# enough data so that the test can return True
a = [x for x in samples_a if not math.isnan(x)]
b = [x for x in samples_b if not math.isnan(x)]
p_min = 1 / binom(len(a) + len(b), min(len(a), len(b)))
if p_min < p_threshold:
_, p = mann_whitney_u(a, b)
return p < p_threshold
# If confidence intervals overlap, reject.
# Corresponds to a test with ill-specified threshold p-value,
# which generally can be significantly smaller than p <= 0.01
# depending on the actual data. For normal test (known variance),
# 0.00027 <= p <= 0.01.
ci_a = (stats_a['ci_99_a'], stats_a['ci_99_b'])
ci_b = (stats_b['ci_99_a'], stats_b['ci_99_b'])
if ci_a[1] >= ci_b[0] and ci_a[0] <= ci_b[1]:
return False
return True
def quantile_ci(x, q, alpha_min=0.01):
"""
Compute a quantile and a confidence interval.
Assumes independence, but otherwise nonparametric.
Parameters
----------
x : list of float
Samples
q : float
Quantile to compute, in [0,1].
alpha_min : float, optional
Limit for coverage.
The result has coverage >= 1 - alpha_min.
Returns
-------
m : float
Quantile of x
ci : tuple of floats
Confidence interval (a, b), of coverage >= alpha_min.
"""
y = sorted(x)
n = len(y)
alpha_min = min(alpha_min, 1 - alpha_min)
pa = alpha_min / 2
pb = 1 - pa
a = -math.inf
b = math.inf
# It's known that
#
# Pr[X_{(r)} < m < X_{(s)}] = Pr[r <= K <= s-1], K ~ Bin(n,p)
#
# where cdf(m) = p defines the quantile.
#
# Simplest median CI follows by picking r,s such that
#
# F(r;n,q) <= alpha/2
# F(s;n,q) >= 1 - alpha/2
#
# F(k;n,q) = sum(binom_pmf(n, j, q) for j in range(k))
#
# Then (y[r-1], y[s-1]) is a CI.
# If no such r or s exists, replace by +-inf.
F = 0
for k, yp in enumerate(y):
F += binom_pmf(n, k, q)
# F = F(k+1;n,q)
if F <= pa:
a = yp
if F >= pb:
b = yp
break
m = quantile(y, q)
return m, (a, b)
def quantile(x, q):
"""
Compute quantile/percentile of the data
Parameters
----------
x : list of float
Data set
q : float
Quantile to compute, 0 <= q <= 1
"""
if not 0 <= q <= 1:
raise ValueError("Invalid quantile")
y = sorted(x)
n = len(y)
z = (n - 1) * q
j = int(math.floor(z))
z -= j
if j == n - 1:
m = y[-1]
else:
m = (1 - z) * y[j] + z * y[j + 1]
return m
_mann_whitney_u_memo = {}
def mann_whitney_u(x, y, method='auto'):
"""
Mann-Whitney U test
Ties are handled conservatively, returning the least significant
tie breaking.
Parameters
----------
x, y : list of float
Samples to test
method : {'auto', 'exact', 'normal'}
Whether to compute p-value exactly of via normal approximation.
The option 'auto' switches to approximation for sample size > 20.
Returns
-------
u : int
U-statistic
p : float
p-value for two-sided alternative
References
----------
.. [1] Mann & Whitney, Ann. Math. Statist. 18, 50 (1947).
.. [2] Gibbons & Chakraborti, "Nonparametric statistical inference". (2003)
"""
memo = _mann_whitney_u_memo
if len(memo) > 100000:
memo.clear()
m = len(x)
n = len(y)
if method == 'auto':
if max(m, n) > 20:
method = 'normal'
else:
method = 'exact'
u, ties = mann_whitney_u_u(x, y)
# Conservative tie breaking
if u <= m * n // 2 and u + ties >= m * n // 2:
ties = m * n // 2 - u
ux1 = min(u, m * n - u)
ux2 = min(u + ties, m * n - (u + ties))
if ux1 >= ux2:
ux = ux1
else:
u = u + ties
ux = ux2
# Get p-value
if method == 'exact':
p1 = mann_whitney_u_cdf(m, n, ux, memo)
p2 = 1.0 - mann_whitney_u_cdf(m, n, max(m * n // 2, m * n - ux - 1), memo)
p = p1 + p2
elif method == 'normal':
N = m + n
var = m * n * (N + 1) / 12
z = (ux - m * n / 2) / math.sqrt(var)
cdf = 0.5 * math.erfc(-z / math.sqrt(2))
p = 2 * cdf
else:
raise ValueError(f"Unknown method {repr(method)}")
return u, p
def mann_whitney_u_u(x, y):
u = 0
ties = 0
for xx in x:
for yy in y:
if xx > yy:
u += 1
elif xx == yy:
ties += 1
return u, ties
def mann_whitney_u_cdf(m, n, u, memo=None):
if memo is None:
memo = {}
cdf = 0
for uu in range(u + 1):
cdf += mann_whitney_u_pmf(m, n, uu, memo)
return cdf
def mann_whitney_u_pmf(m, n, u, memo=None):
if memo is None:
memo = {}
return mann_whitney_u_r(m, n, u, memo) / binom(m + n, m)
def mann_whitney_u_r(m, n, u, memo=None):
"""
Number of orderings in Mann-Whitney U test.
The PMF of U for samples of sizes (m, n) is given by
p(u) = r(m, n, u) / binom(m + n, m).
References
----------
.. [1] Mann & Whitney, Ann. Math. Statist. 18, 50 (1947).
"""
if u < 0:
value = 0
elif m == 0 or n == 0:
value = 1 if u == 0 else 0
else:
# Don't bother figuring out table construction, memoization
# sorts it out
if memo is None:
memo = {}
key = (m, n, u)
value = memo.get(key)
if value is not None:
return value
value = (mann_whitney_u_r(m, n - 1, u, memo) +
mann_whitney_u_r(m - 1, n, u - n, memo))
memo[key] = value
return value
def binom_pmf(n, k, p):
"""Binomial pmf = (n choose k) p**k (1 - p)**(n - k)"""
if not (0 <= k <= n):
return 0
if p == 0:
return 1.0 * (k == 0)
elif p == 1.0:
return 1.0 * (k == n)
logp = math.log(p)
log1mp = math.log(1 - p)
lg1pn = math.lgamma(1 + n)
lg1pnmk = math.lgamma(1 + n - k)
lg1pk = math.lgamma(1 + k)
return math.exp(lg1pn - lg1pnmk - lg1pk + k * logp + (n - k) * log1mp)
def binom(n, k):
"""
Binomial coefficient (n over k)
"""
n = index(n)
k = index(k)
if not 0 <= k <= n:
return 0
m = n + 1
num = 1
den = 1
for j in range(1, min(k, n - k) + 1):
num *= m - j
den *= j
return num // den
class LaplacePosterior:
"""
Univariate distribution::
p(beta|y) = N [sum(|y_j - beta|)]**(-nu-1)
where N is the normalization factor.
Parameters
----------
y : list of float
Samples
nu : float, optional
Degrees of freedom. Default: len(y)-1
Notes
-----
This is the posterior distribution in the Bayesian model assuming
Laplace distributed noise::
p(y|beta,sigma) = N exp(- sum_j (1/sigma) |y_j - beta|)
p(sigma) ~ 1/sigma
nu = len(y) - 1
The MLE for beta is median(y).
Note that the same approach applied to a Gaussian model::
p(y|beta,sigma) = N exp(- sum_j 1/(2 sigma^2) (y_j - beta)^2)
results to::
p(beta|y) = N T(t, m-1); t = (beta - mean(y)) / (sstd(y) / sqrt(m))
where ``T(t, nu)`` is the Student t-distribution pdf, which then gives
the standard textbook formulas for the mean.
"""
def __init__(self, y, nu=None):
if len(y) == 0:
raise ValueError("empty input")
if nu is None:
self.nu = len(y) - 1
else:
self.nu = nu
# Sort input
y = sorted(y)
# Get location and scale so that data is centered at MLE, and
# the unnormalized PDF at MLE has amplitude ~ 1/nu.
#
# Proper scaling of inputs is important to avoid overflows
# when computing the unnormalized CDF integrals below.
self.mle = quantile(y, 0.5)
self._y_scale = sum(abs(yp - self.mle) for yp in y)
self._y_scale *= self.nu**(1 / (self.nu + 1))
# Shift and scale
if self._y_scale != 0:
self.y = [(yp - self.mle) / self._y_scale for yp in y]
else:
self.y = [0 for yp in y]
self._cdf_norm = None
self._cdf_memo = {}
def _cdf_unnorm(self, beta):
"""
Unnormalized CDF of this distribution::
cdf_unnorm(b) = int_{-oo}^{b} 1/(sum_j |y - b'|)**(m+1) db'
"""
if beta != beta:
return beta
for k, y in enumerate(self.y):
if y > beta:
k0 = k
break
else:
k0 = len(self.y)
cdf = 0
nu = self.nu
# Save some work by memoizing intermediate results
if k0 - 1 in self._cdf_memo:
k_start = k0
cdf = self._cdf_memo[k0 - 1]
else:
k_start = 0
cdf = 0
# Do the integral piecewise, resolving the absolute values
for k in range(k_start, k0 + 1):
c = 2 * k - len(self.y)
y = sum(self.y[k:]) - sum(self.y[:k])
if k == 0:
a = -math.inf
else:
a = self.y[k - 1]
if k == k0:
b = beta
else:
b = self.y[k]
if c == 0:
term = (b - a) / y**(nu + 1)
else:
term = 1 / (nu * c) * ((a * c + y)**(-nu) - (b * c + y)**(-nu))
cdf += max(0, term) # avoid rounding error
if k != k0:
self._cdf_memo[k] = cdf
if beta == math.inf:
self._cdf_memo[len(self.y)] = cdf
return cdf
def _ppf_unnorm(self, cdfx):
"""
Inverse function for _cdf_unnorm
"""
# Find interval
for k in range(len(self.y) + 1):
if cdfx <= self._cdf_memo[k]:
break
# Invert on interval
c = 2 * k - len(self.y)
y = sum(self.y[k:]) - sum(self.y[:k])
nu = self.nu
if k == 0:
term = cdfx
else:
a = self.y[k - 1]
term = cdfx - self._cdf_memo[k - 1]
if k == 0:
z = -nu * c * term
if z > 0:
beta = (z**(-1 / nu) - y) / c
else:
beta = -math.inf
elif c == 0:
beta = a + term * y**(nu + 1)
else:
z = (a * c + y)**(-nu) - nu * c * term
if z > 0:
beta = (z**(-1 / nu) - y) / c
else:
beta = math.inf
if k < len(self.y):
beta = min(beta, self.y[k])
return beta
def pdf(self, beta):
"""
Probability distribution function
"""
return math.exp(self.logpdf(beta))
def logpdf(self, beta):
"""
Logarithm of probability distribution function
"""
if self._y_scale == 0:
return math.inf if beta == self.mle else -math.inf
beta = (beta - self.mle) / self._y_scale
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(math.inf)
ws = sum(abs(yp - beta) for yp in self.y)
m = self.nu
return -(m + 1) * math.log(ws) - math.log(self._cdf_norm) - math.log(self._y_scale)
def cdf(self, beta):
"""
Cumulative probability distribution function
"""
if self._y_scale == 0:
return 1.0 * (beta > self.mle)
beta = (beta - self.mle) / self._y_scale
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(math.inf)
return self._cdf_unnorm(beta) / self._cdf_norm
def ppf(self, cdf):
"""
Percent point function (inverse function for cdf)
"""
if cdf < 0 or cdf > 1.0:
return math.nan
if self._y_scale == 0:
return self.mle
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(math.inf)
cdfx = min(cdf * self._cdf_norm, self._cdf_norm)
beta = self._ppf_unnorm(cdfx)
return beta * self._y_scale + self.mle
| bsd-3-clause | df3740293b77da5d496f49442cb1ed26 | 23.439297 | 91 | 0.494869 | 3.248885 | false | false | false | false |
airspeed-velocity/asv | asv/commands/quickstart.py | 1 | 3555 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import shutil
from . import Command
from ..console import log, color_print
class Quickstart(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"quickstart", help="Create a new benchmarking suite",
description="Creates a new benchmarking suite")
parser.add_argument(
"--dest", "-d", default=".",
help="The destination directory for the new benchmarking "
"suite")
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"--top-level", action="store_true", dest="top_level", default=None,
help="Use layout suitable for putting the benchmark suite on "
"the top level of the project's repository")
grp.add_argument(
"--no-top-level", action="store_false", dest="top_level", default=None,
help="Use layout suitable for putting the benchmark suite in "
"a separate repository")
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_args(cls, args):
return cls.run(dest=args.dest, top_level=args.top_level)
@classmethod
def run(cls, dest=".", top_level=None):
log.info("Setting up new Airspeed Velocity benchmark suite.")
if top_level is None:
log.flush()
color_print("")
color_print("Which of the following template layouts to use:")
color_print("(1) benchmark suite at the top level of the project repository")
color_print("(2) benchmark suite in a separate repository")
color_print("")
while True:
answer = input("Layout to use? [1/2] ")
if answer.lower()[:1] == "1":
top_level = True
break
elif answer.lower()[:1] == "2":
top_level = False
break
color_print("")
template_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'template')
for entry in os.listdir(template_path):
path = os.path.join(template_path, entry)
dest_path = os.path.join(dest, entry)
if os.path.exists(dest_path):
log.info("Template content already exists.")
log.info("Edit asv.conf.json to continue.")
return 1
for entry in os.listdir(template_path):
path = os.path.join(template_path, entry)
dest_path = os.path.join(dest, entry)
if os.path.isdir(path):
shutil.copytree(path, os.path.join(dest, entry))
elif os.path.isfile(path):
shutil.copyfile(path, os.path.join(dest, entry))
if top_level:
conf_file = os.path.join(dest, 'asv.conf.json')
with open(conf_file, 'r') as f:
conf = f.read()
reps = [('"repo": "",', '"repo": ".",'),
('// "env_dir": "env",', '"env_dir": ".asv/env",'),
('// "results_dir": "results",', '"results_dir": ".asv/results",'),
('// "html_dir": "html",', '"html_dir": ".asv/html",')]
for src, dst in reps:
conf = conf.replace(src, dst)
with open(conf_file, 'w') as f:
f.write(conf)
log.info("Edit asv.conf.json to get started.")
| bsd-3-clause | b12a4905af740a3cd73e13b94c9b16ea | 36.421053 | 89 | 0.535865 | 4.105081 | false | false | false | false |
airspeed-velocity/asv | asv/commands/rm.py | 1 | 3613 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
from fnmatch import fnmatchcase
from . import Command, util
from .. import console
from ..console import log
from ..results import iter_results
class Rm(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"rm", help="Remove results from the database",
description="""
Removes entries from the results database.
""")
parser.add_argument(
'patterns', nargs='+',
help="""Pattern(s) to match, each of the form X=Y. X may
be one of "benchmark", "commit_hash", "python" or any of
the machine or environment params. Y is a case-sensitive
glob pattern.""")
parser.add_argument(
"-y", action="store_true",
help="""Don't prompt for confirmation.""")
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args):
return cls.run(conf, args.patterns, args.y)
@classmethod
def run(cls, conf, patterns, y=True):
global_patterns = {}
single_benchmark = None
files_to_remove = set()
count = 0
for pattern in patterns:
parts = pattern.split('=', 1)
if len(parts) != 2:
raise util.UserError(f"Invalid pattern '{pattern}'")
if parts[0] == 'benchmark':
if single_benchmark is not None:
raise util.UserError("'benchmark' appears more than once")
single_benchmark = parts[1]
else:
if parts[0] in global_patterns:
raise util.UserError(
f"'{parts[0]}' appears more than once")
global_patterns[parts[0]] = parts[1]
for result in iter_results(conf.results_dir):
found = True
for key, val in global_patterns.items():
if key == 'commit_hash':
if not util.hash_equal(result.commit_hash, val):
found = False
break
elif key == 'python':
if not fnmatchcase(result.env.python, val):
found = False
break
else:
if not fnmatchcase(result.params.get(key), val):
found = False
break
if not found:
continue
if single_benchmark is not None:
found = False
for benchmark in list(result.get_all_result_keys()):
if fnmatchcase(benchmark, single_benchmark):
count += 1
files_to_remove.add(result)
result.remove_result(benchmark)
else:
files_to_remove.add(result)
if single_benchmark is not None:
log.info(f"Removing {count} benchmarks in {len(files_to_remove)} files")
else:
log.info(f"Removing {len(files_to_remove)} files")
if not y:
do = console.get_answer_default("Perform operations", "n")
if len(do) and do.lower()[0] != 'y':
sys.exit(0)
if single_benchmark is not None:
for result in files_to_remove:
result.save(conf.results_dir)
else:
for result in files_to_remove:
result.rm(conf.results_dir)
| bsd-3-clause | cfb3caa28d7562736de5860e43ce92e9 | 33.740385 | 84 | 0.515084 | 4.596692 | false | false | false | false |
joblib/joblib | setup.py | 1 | 1915 | #!/usr/bin/env python
from setuptools import setup
import pathlib
import joblib
# Get the long description from the README file
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / 'README.rst').read_text(encoding='utf-8')
setup(
name='joblib',
version=joblib.__version__,
author='Gael Varoquaux',
author_email='gael.varoquaux@normalesup.org',
url='https://joblib.readthedocs.io',
project_urls={
'Source': 'https://github.com/joblib/joblib',
},
license='BSD',
description="Lightweight pipelining with Python functions",
long_description=long_description,
long_description_content_type="text/x-rst",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries',
],
platforms='any',
package_data={
'joblib.test': [
'data/*.gz',
'data/*.gzip',
'data/*.bz2',
'data/*.xz',
'data/*.lzma',
'data/*.pkl',
'data/*.npy',
'data/*.npy.z',
]
},
packages=[
'joblib', 'joblib.test', 'joblib.test.data',
'joblib.externals', 'joblib.externals.cloudpickle',
'joblib.externals.loky', 'joblib.externals.loky.backend',
],
python_requires='>=3.7',
)
| bsd-3-clause | ec7be12d76ab40d7e48e73a83dfebb35 | 30.393443 | 68 | 0.57859 | 3.948454 | false | false | true | false |
airspeed-velocity/asv | asv/commands/common_args.py | 1 | 10888 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import multiprocessing
import argparse
from .. import __version__, util
def add_global_arguments(parser, suppress_defaults=True):
# Suppressing defaults is needed in order to allow global
# arguments both before and after subcommand. Only the top-level
# parser should have suppress_defaults=False
if suppress_defaults:
suppressor = dict(default=argparse.SUPPRESS)
else:
suppressor = dict()
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Increase verbosity",
**suppressor)
parser.add_argument(
"--config",
help="Benchmark configuration file",
default=(argparse.SUPPRESS if suppress_defaults else 'asv.conf.json'))
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__,
help="Print program version",
**suppressor)
def add_compare(parser, only_changed_default=False, sort_default='name'):
parser.add_argument(
'--factor', "-f", type=float, default=1.1,
help="""The factor above or below which a result is considered
problematic. For example, with a factor of 1.1 (the default
value), if a benchmark gets 10%% slower or faster, it will
be displayed in the results list.""")
parser.add_argument(
'--no-stats', action="store_false", dest="use_stats", default=True,
help="""Do not use result statistics in comparisons, only `factor`
and the median result.""")
parser.add_argument(
'--split', '-s', action='store_true',
help="""Split the output into a table of benchmarks that have
improved, stayed the same, and gotten worse.""")
parser.add_argument(
'--only-changed', action='store_true', default=only_changed_default,
help="""Whether to show only changed results.""")
parser.add_argument('--no-only-changed', dest='only_changed', action='store_false')
parser.add_argument(
'--sort', action='store', type=str, choices=('name', 'ratio'),
default=sort_default, help="""Sort order""")
def add_show_stderr(parser):
parser.add_argument(
"--show-stderr", "-e", action="store_true",
help="""Display the stderr output from the benchmarks.""")
class DictionaryArgAction(argparse.Action):
"""
Parses multiple key=value assignments into a dictionary.
"""
def __init__(self, option_strings, dest, converters=None, choices=None,
dict_dest=None, **kwargs):
if converters is None:
converters = {}
self.converters = converters
self.__choices = choices
self.dict_dest = dict_dest
super(DictionaryArgAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
# Parse and check value
if self.dict_dest is None:
try:
key, value = values.split("=", 1)
except ValueError:
raise argparse.ArgumentError(self,
"{!r} is not a key=value assignment".format(values))
else:
key = self.dict_dest
value = values
if self.__choices is not None and key not in self.__choices:
raise argparse.ArgumentError(self,
"{!r} cannot be set".format(key))
dest_key = key
conv = self.converters.get(key, None)
if isinstance(conv, tuple):
dest_key, conv = conv
if conv is not None:
try:
value = conv(value)
except ValueError as exc:
raise argparse.ArgumentError(self,
"{!r}: {}".format(key, exc))
# Store value
result = getattr(namespace, self.dest, None)
if result is None:
result = {}
result[dest_key] = value
setattr(namespace, self.dest, result)
def add_bench(parser):
parser.add_argument(
"--bench", "-b", type=str, action="append",
help="""Regular expression(s) for benchmark to run. When not
provided, all benchmarks are run.""")
def parse_repeat(value):
try:
return int(value)
except ValueError:
pass
min_repeat, max_repeat, max_time = value.lstrip('(').rstrip(')').split(',')
value = (int(min_repeat), int(max_repeat), float(max_time))
return value
def parse_affinity(value):
if "," in value:
value = value.split(",")
else:
value = [value]
affinity_list = []
for v in value:
if "-" in v:
a, b = v.split("-", 1)
a = int(a)
b = int(b)
affinity_list.extend(range(a, b + 1))
else:
affinity_list.append(int(v))
num_cpu = multiprocessing.cpu_count()
for n in affinity_list:
if not (0 <= n < num_cpu):
raise ValueError("CPU {!r} not in range 0-{!r}".format(n, num_cpu - 1))
return affinity_list
converters = {
'timeout': float,
'version': str,
'warmup_time': float,
'repeat': parse_repeat,
'number': int,
'rounds': int,
'processes': ('rounds', int), # backward compatibility
'sample_time': float,
'cpu_affinity': parse_affinity
}
parser.add_argument(
"--attribute", "-a", action=DictionaryArgAction,
choices=tuple(converters.keys()), converters=converters,
help="""Override a benchmark attribute, e.g. `-a repeat=10`.""")
parser.add_argument(
"--cpu-affinity", action=DictionaryArgAction, dest="attribute",
dict_dest="cpu_affinity",
choices=tuple(converters.keys()), converters=converters,
help=("Set CPU affinity for running the benchmark, in format: "
"0 or 0,1,2 or 0-3. Default: not set"))
def add_machine(parser):
parser.add_argument(
"--machine", "-m", type=str, default=None,
help="""Use the given name to retrieve machine information.
If not provided, the hostname is used. If no entry with that
name is found, and there is only one entry in
~/.asv-machine.json, that one entry will be used.""")
class PythonArgAction(argparse.Action):
"""
Backward compatibility --python XYZ argument,
will be interpreted as --environment :XYZ
"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(PythonArgAction, self).__init__(option_strings, dest, nargs=1, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = list(getattr(namespace, "env_spec", []))
if values == "same":
items.extend(["existing:same"])
else:
items.extend([":" + value for value in values])
setattr(namespace, "env_spec", items)
def add_environment(parser, default_same=False):
help = """Specify the environment and Python versions for running the
benchmarks. String of the format 'environment_type:python_version',
for example 'conda:2.7'. If the Python version is not specified,
all those listed in the configuration file are run. The special
environment type 'existing:/path/to/python' runs the benchmarks
using the given Python interpreter; if the path is omitted,
the Python running asv is used. For 'existing', the benchmarked
project must be already installed, including all dependencies.
"""
if default_same:
help += "The default value is 'existing:same'"
else:
help += """By default, uses the values specified in the
configuration file."""
parser.add_argument(
"-E", "--environment",
dest="env_spec",
action="append",
default=[],
help=help)
# The --python argument exists for backward compatibility. It
# will just set the part after ':' in the environment spec.
parser.add_argument(
"--python", action=PythonArgAction, metavar="PYTHON",
help="Same as --environment=:PYTHON")
def add_launch_method(parser):
parser.add_argument(
"--launch-method",
dest="launch_method",
action="store",
choices=("auto", "spawn", "forkserver"),
default="auto",
help="How to launch benchmarks. Choices: auto, spawn, forkserver")
def add_parallel(parser):
parser.add_argument(
"--parallel", "-j", nargs='?', type=int, default=1, const=-1,
help="""Build (but don't benchmark) in parallel. The value is
the number of CPUs to use, or if no number provided, use the
number of cores on this machine.""")
def add_record_samples(parser, record_default=False):
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"--record-samples", action="store_true", dest="record_samples",
help=(argparse.SUPPRESS if record_default else
"""Store raw measurement samples, not only statistics"""),
default=record_default)
grp.add_argument(
"--no-record-samples", action="store_false", dest="record_samples",
help=(argparse.SUPPRESS if not record_default else
"""Do not store raw measurement samples, but only statistics"""),
default=record_default)
parser.add_argument(
"--append-samples", action="store_true",
help="""Combine new measurement samples with previous results,
instead of discarding old results. Implies --record-samples.
The previous run must also have been run with --record/append-samples.""")
def positive_int(string):
"""
Parse a positive integer argument
"""
try:
value = int(string)
if not value > 0:
raise ValueError()
return value
except ValueError:
raise argparse.ArgumentTypeError("%r is not a positive integer" % (string,))
def positive_int_or_inf(string):
"""
Parse a positive integer argument
"""
try:
if string == 'all':
return math.inf
value = int(string)
if not value > 0:
raise ValueError()
return value
except ValueError:
raise argparse.ArgumentTypeError("%r is not a positive integer or 'all'" % (string,))
def time_period(string, base_period='d'):
"""
Parse a time period argument with unit suffix
"""
try:
return util.parse_human_time(string, base_period)
except ValueError as err:
raise argparse.ArgumentTypeError(str(err))
| bsd-3-clause | f874393905f40ce892a98ec5c9878ece | 33.455696 | 97 | 0.594232 | 4.310372 | false | false | false | false |
joblib/joblib | joblib/externals/loky/backend/reduction.py | 2 | 7062 | ###############################################################################
# Customizable Pickler with some basic reducers
#
# author: Thomas Moreau
#
# adapted from multiprocessing/reduction.py (17/02/2017)
# * Replace the ForkingPickler with a similar _LokyPickler,
# * Add CustomizableLokyPickler to allow customizing pickling process
# on the fly.
#
import copyreg
import io
import functools
import types
import sys
import os
from multiprocessing import util
from pickle import loads, HIGHEST_PROTOCOL
###############################################################################
# Enable custom pickling in Loky.
_dispatch_table = {}
def register(type_, reduce_function):
_dispatch_table[type_] = reduce_function
###############################################################################
# Registers extra pickling routines to improve picklization for loky
# make methods picklable
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
@classmethod
def h(cls):
pass
register(type(_C().f), _reduce_method)
register(type(_C.h), _reduce_method)
if not hasattr(sys, "pypy_version_info"):
# PyPy uses functions instead of method_descriptors and wrapper_descriptors
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)
# Make partial func pickable
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)
if sys.platform != "win32":
from ._posix_reduction import _mk_inheritable # noqa: F401
else:
from . import _win_reduction # noqa: F401
# global variable to change the pickler behavior
try:
from joblib.externals import cloudpickle # noqa: F401
DEFAULT_ENV = "cloudpickle"
except ImportError:
# If cloudpickle is not present, fallback to pickle
DEFAULT_ENV = "pickle"
ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV)
_LokyPickler = None
_loky_pickler_name = None
def set_loky_pickler(loky_pickler=None):
global _LokyPickler, _loky_pickler_name
if loky_pickler is None:
loky_pickler = ENV_LOKY_PICKLER
loky_pickler_cls = None
# The default loky_pickler is cloudpickle
if loky_pickler in ["", None]:
loky_pickler = "cloudpickle"
if loky_pickler == _loky_pickler_name:
return
if loky_pickler == "cloudpickle":
from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls
else:
try:
from importlib import import_module
module_pickle = import_module(loky_pickler)
loky_pickler_cls = module_pickle.Pickler
except (ImportError, AttributeError) as e:
extra_info = ("\nThis error occurred while setting loky_pickler to"
f" '{loky_pickler}', as required by the env variable "
"LOKY_PICKLER or the function set_loky_pickler.")
e.args = (e.args[0] + extra_info,) + e.args[1:]
e.msg = e.args[0]
raise e
util.debug(
f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for "
"serialization."
)
class CustomizablePickler(loky_pickler_cls):
_loky_pickler_cls = loky_pickler_cls
def _set_dispatch_table(self, dispatch_table):
for ancestor_class in self._loky_pickler_cls.mro():
dt_attribute = getattr(ancestor_class, "dispatch_table", None)
if isinstance(dt_attribute, types.MemberDescriptorType):
# Ancestor class (typically _pickle.Pickler) has a
# member_descriptor for its "dispatch_table" attribute. Use
# it to set the dispatch_table as a member instead of a
# dynamic attribute in the __dict__ of the instance,
# otherwise it will not be taken into account by the C
# implementation of the dump method if a subclass defines a
# class-level dispatch_table attribute as was done in
# cloudpickle 1.6.0:
# https://github.com/joblib/loky/pull/260
dt_attribute.__set__(self, dispatch_table)
break
# On top of member descriptor set, also use setattr such that code
# that directly access self.dispatch_table gets a consistent view
# of the same table.
self.dispatch_table = dispatch_table
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
loky_pickler_cls.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(self, "dispatch_table"):
# Force a copy that we will update without mutating the
# any class level defined dispatch_table.
loky_dt = dict(self.dispatch_table)
else:
# Use standard reducers as bases
loky_dt = copyreg.dispatch_table.copy()
# Register loky specific reducers
loky_dt.update(_dispatch_table)
# Set the new dispatch table, taking care of the fact that we
# need to use the member_descriptor when we inherit from a
# subclass of the C implementation of the Pickler base class
# with an class level dispatch_table attribute.
self._set_dispatch_table(loky_dt)
# Register the reducers
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
"""Attach a reducer function to a given type in the dispatch table.
"""
self.dispatch_table[type] = reduce_func
_LokyPickler = CustomizablePickler
_loky_pickler_name = loky_pickler
def get_loky_pickler_name():
global _loky_pickler_name
return _loky_pickler_name
def get_loky_pickler():
global _LokyPickler
return _LokyPickler
# Set it to its default value
set_loky_pickler()
def dump(obj, file, reducers=None, protocol=None):
'''Replacement for pickle.dump() using _LokyPickler.'''
global _LokyPickler
_LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
def dumps(obj, reducers=None, protocol=None):
global _LokyPickler
buf = io.BytesIO()
dump(obj, buf, reducers=reducers, protocol=protocol)
return buf.getbuffer()
__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"]
if sys.platform == "win32":
from multiprocessing.reduction import duplicate
__all__ += ["duplicate"]
| bsd-3-clause | 27f45e5519e25cab0e38feed0bceae45 | 31.246575 | 81 | 0.609176 | 3.954087 | false | false | false | false |
joblib/joblib | joblib/externals/loky/process_executor.py | 1 | 48885 | ###############################################################################
# Re-implementation of the ProcessPoolExecutor more robust to faults
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from concurrent/futures/process_pool_executor.py (17/02/2017)
# * Add an extra management thread to detect executor_manager_thread failures,
# * Improve the shutdown process to avoid deadlocks,
# * Add timeout for workers,
# * More robust pickling process.
#
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | | | | Call Q | | Process |
| | +----------+ | | +-----------+ | Pool |
| | | ... | | | | ... | +---------+
| | | 6 | => | | => | 5, call() | => | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | +--------+ | 4, result | | |
| | | ... | | 3, except | | |
+----------+ +------------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Thomas Moreau (thomas.moreau.2010@gmail.com)'
import os
import gc
import sys
import queue
import struct
import weakref
import warnings
import itertools
import traceback
import threading
from time import time, sleep
import multiprocessing as mp
from functools import partial
from pickle import PicklingError
from concurrent.futures import Executor
from concurrent.futures._base import LOGGER
from concurrent.futures.process import BrokenProcessPool as _BPPException
from multiprocessing.connection import wait
from ._base import Future
from .backend import get_context
from .backend.context import cpu_count
from .backend.queues import Queue, SimpleQueue
from .backend.reduction import set_loky_pickler, get_loky_pickler_name
from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker
from .initializers import _prepare_initializer
# Mechanism to prevent infinite process spawning. When a worker of a
# ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new
# Executor, a LokyRecursionError is raised
MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10))
_CURRENT_DEPTH = 0
# Minimum time interval between two consecutive memory leak protection checks.
_MEMORY_LEAK_CHECK_DELAY = 1.
# Number of bytes of memory usage allowed over the reference process size.
_MAX_MEMORY_LEAK_SIZE = int(3e8)
try:
from psutil import Process
_USE_PSUTIL = True
def _get_memory_usage(pid, force_gc=False):
if force_gc:
gc.collect()
mem_size = Process(pid).memory_info().rss
mp.util.debug(f'psutil return memory size: {mem_size}')
return mem_size
except ImportError:
_USE_PSUTIL = False
class _ThreadWakeup:
def __init__(self):
self._closed = False
self._reader, self._writer = mp.Pipe(duplex=False)
def close(self):
if not self._closed:
self._closed = True
self._writer.close()
self._reader.close()
def wakeup(self):
if not self._closed:
self._writer.send_bytes(b"")
def clear(self):
if not self._closed:
while self._reader.poll():
self._reader.recv_bytes()
class _ExecutorFlags:
"""necessary references to maintain executor states without preventing gc
It permits to keep the information needed by executor_manager_thread
and crash_detection_thread to maintain the pool without preventing the
garbage collection of unreferenced executors.
"""
def __init__(self, shutdown_lock):
self.shutdown = False
self.broken = None
self.kill_workers = False
self.shutdown_lock = shutdown_lock
def flag_as_shutting_down(self, kill_workers=None):
with self.shutdown_lock:
self.shutdown = True
if kill_workers is not None:
self.kill_workers = kill_workers
def flag_as_broken(self, broken):
with self.shutdown_lock:
self.shutdown = True
self.broken = broken
# Prior to 3.9, executor_manager_thread is created as daemon thread. This means
# that it is not joined automatically when the interpreter is shutting down.
# To work around this problem, an exit handler is installed to tell the
# thread to exit when the interpreter is shutting down and then waits until
# it finishes. The thread needs to be daemonized because the atexit hooks are
# called after all non daemonized threads are joined.
#
# Starting 3.9, there exists a specific atexit hook to be called before joining
# the threads so the executor_manager_thread does not need to be daemonized
# anymore.
#
# The atexit hooks are registered when starting the first ProcessPoolExecutor
# to avoid import having an effect on the interpreter.
_threads_wakeups = weakref.WeakKeyDictionary()
_global_shutdown = False
def _python_exit():
global _global_shutdown
_global_shutdown = True
items = list(_threads_wakeups.items())
if len(items) > 0:
mp.util.debug("Interpreter shutting down. Waking up "
f"executor_manager_thread {items}")
for _, (shutdown_lock, thread_wakeup) in items:
with shutdown_lock:
thread_wakeup.wakeup()
for thread, _ in items:
thread.join()
# With the fork context, _thread_wakeups is propagated to children.
# Clear it after fork to avoid some situation that can cause some
# freeze when joining the workers.
mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear())
# Module variable to register the at_exit call
process_pool_executor_at_exit = None
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _RemoteTraceback(Exception):
"""Embed stringification of remote traceback in local traceback
"""
def __init__(self, tb=None):
self.tb = f'\n"""\n{tb}"""'
def __str__(self):
return self.tb
class _ExceptionWithTraceback(BaseException):
def __init__(self, exc):
tb = getattr(exc, "__traceback__", None)
if tb is None:
_, _, tb = sys.exc_info()
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = tb
def __reduce__(self):
return _rebuild_exc, (self.exc, self.tb)
def _rebuild_exc(exc, tb):
exc.__cause__ = _RemoteTraceback(tb)
return exc
class _WorkItem:
__slots__ = ["future", "fn", "args", "kwargs"]
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem:
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem:
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
# Store the current loky_pickler so it is correctly set in the worker
self.loky_pickler = get_loky_pickler_name()
def __call__(self):
set_loky_pickler(self.loky_pickler)
return self.fn(*self.args, **self.kwargs)
def __repr__(self):
return (
f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})"
)
class _SafeQueue(Queue):
"""Safe Queue set exception to the future object linked to a job"""
def __init__(self, max_size=0, ctx=None, pending_work_items=None,
running_work_items=None, thread_wakeup=None, reducers=None):
self.thread_wakeup = thread_wakeup
self.pending_work_items = pending_work_items
self.running_work_items = running_work_items
super().__init__(max_size, reducers=reducers, ctx=ctx)
def _on_queue_feeder_error(self, e, obj):
if isinstance(obj, _CallItem):
# format traceback only works on python3
if isinstance(e, struct.error):
raised_error = RuntimeError(
"The task could not be sent to the workers as it is too "
"large for `send_bytes`.")
else:
raised_error = PicklingError(
"Could not pickle the task to send it to the workers.")
tb = traceback.format_exception(
type(e), e, getattr(e, "__traceback__", None))
raised_error.__cause__ = _RemoteTraceback(''.join(tb))
work_item = self.pending_work_items.pop(obj.work_id, None)
self.running_work_items.remove(obj.work_id)
# work_item can be None if another process terminated. In this
# case, the executor_manager_thread fails all work_items with
# BrokenProcessPool
if work_item is not None:
work_item.future.set_exception(raised_error)
del work_item
self.thread_wakeup.wakeup()
else:
super()._on_queue_feeder_error(e, obj)
def _get_chunks(chunksize, *iterables):
"""Iterates over zip()ed iterables in chunks. """
it = zip(*iterables)
while True:
chunk = tuple(itertools.islice(it, chunksize))
if not chunk:
return
yield chunk
def _process_chunk(fn, chunk):
"""Processes a chunk of an iterable passed to map.
Runs the function passed to map() on a chunk of the
iterable passed to map.
This function is run in a separate process.
"""
return [fn(*args) for args in chunk]
def _sendback_result(result_queue, work_id, result=None, exception=None):
"""Safely send back the given result or exception"""
try:
result_queue.put(_ResultItem(work_id, result=result,
exception=exception))
except BaseException as e:
exc = _ExceptionWithTraceback(e)
result_queue.put(_ResultItem(work_id, exception=exc))
def _process_worker(call_queue, result_queue, initializer, initargs,
processes_management_lock, timeout, worker_exit_lock,
current_depth):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A ctx.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A ctx.Queue of _ResultItems that will written
to by the worker.
initializer: A callable initializer, or None
initargs: A tuple of args for the initializer
processes_management_lock: A ctx.Lock avoiding worker timeout while
some workers are being spawned.
timeout: maximum time to wait for a new item in the call_queue. If that
time is expired, the worker will shutdown.
worker_exit_lock: Lock to avoid flagging the executor as broken on
workers timeout.
current_depth: Nested parallelism level, to avoid infinite spawning.
"""
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
LOGGER.critical('Exception in initializer:', exc_info=True)
# The parent will notice that the process stopped and
# mark the pool broken
return
# set the global _CURRENT_DEPTH mechanism to limit recursive call
global _CURRENT_DEPTH
_CURRENT_DEPTH = current_depth
_process_reference_size = None
_last_memory_leak_check = None
pid = os.getpid()
mp.util.debug(f'Worker started with timeout={timeout}')
while True:
try:
call_item = call_queue.get(block=True, timeout=timeout)
if call_item is None:
mp.util.info("Shutting down worker on sentinel")
except queue.Empty:
mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s")
if processes_management_lock.acquire(block=False):
processes_management_lock.release()
call_item = None
else:
mp.util.info("Could not acquire processes_management_lock")
continue
except BaseException:
previous_tb = traceback.format_exc()
try:
result_queue.put(_RemoteTraceback(previous_tb))
except BaseException:
# If we cannot format correctly the exception, at least print
# the traceback.
print(previous_tb)
mp.util.debug('Exiting with code 1')
sys.exit(1)
if call_item is None:
# Notify queue management thread about worker shutdown
result_queue.put(pid)
is_clean = worker_exit_lock.acquire(True, timeout=30)
# Early notify any loky executor running in this worker process
# (nested parallelism) that this process is about to shutdown to
# avoid a deadlock waiting undifinitely for the worker to finish.
_python_exit()
if is_clean:
mp.util.debug('Exited cleanly')
else:
mp.util.info('Main process did not release worker_exit')
return
try:
r = call_item()
except BaseException as e:
exc = _ExceptionWithTraceback(e)
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
else:
_sendback_result(result_queue, call_item.work_id, result=r)
del r
# Free the resource as soon as possible, to avoid holding onto
# open files or shared memory that is not needed anymore
del call_item
if _USE_PSUTIL:
if _process_reference_size is None:
# Make reference measurement after the first call
_process_reference_size = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
continue
if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY:
mem_usage = _get_memory_usage(pid)
_last_memory_leak_check = time()
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
# Memory usage stays within bounds: everything is fine.
continue
# Check again memory usage; this time take the measurement
# after a forced garbage collection to break any reference
# cycles.
mem_usage = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE:
# The GC managed to free the memory: everything is fine.
continue
# The process is leaking memory: let the master process
# know that we need to start a new worker.
mp.util.info("Memory leak detected: shutting down worker")
result_queue.put(pid)
with worker_exit_lock:
mp.util.debug('Exit due to memory leak')
return
else:
# if psutil is not installed, trigger gc.collect events
# regularly to limit potential memory leaks due to reference cycles
if (_last_memory_leak_check is None or
(time() - _last_memory_leak_check >
_MEMORY_LEAK_CHECK_DELAY)):
gc.collect()
_last_memory_leak_check = time()
class _ExecutorManagerThread(threading.Thread):
"""Manages the communication between this process and the worker processes.
The manager is run in a local thread.
Args:
executor: A reference to the ProcessPoolExecutor that owns
this thread. A weakref will be own by the manager as well as
references to internal objects used to introspect the state of
the executor.
"""
def __init__(self, executor):
# Store references to necessary internals of the executor.
# A _ThreadWakeup to allow waking up the executor_manager_thread from
# the main Thread and avoid deadlocks caused by permanently
# locked queues.
self.thread_wakeup = executor._executor_manager_thread_wakeup
self.shutdown_lock = executor._shutdown_lock
# A weakref.ref to the ProcessPoolExecutor that owns this thread. Used
# to determine if the ProcessPoolExecutor has been garbage collected
# and that the manager can exit.
# When the executor gets garbage collected, the weakref callback
# will wake up the queue management thread so that it can terminate
# if there is no pending work item.
def weakref_cb(_,
thread_wakeup=self.thread_wakeup,
shutdown_lock=self.shutdown_lock):
if mp is not None:
# At this point, the multiprocessing module can already be
# garbage collected. We only log debug info when still
# possible.
mp.util.debug('Executor collected: triggering callback for'
' QueueManager wakeup')
with shutdown_lock:
thread_wakeup.wakeup()
self.executor_reference = weakref.ref(executor, weakref_cb)
# The flags of the executor
self.executor_flags = executor._flags
# A list of the ctx.Process instances used as workers.
self.processes = executor._processes
# A ctx.Queue that will be filled with _CallItems derived from
# _WorkItems for processing by the process workers.
self.call_queue = executor._call_queue
# A ctx.SimpleQueue of _ResultItems generated by the process workers.
self.result_queue = executor._result_queue
# A queue.Queue of work ids e.g. Queue([5, 6, ...]).
self.work_ids_queue = executor._work_ids
# A dict mapping work ids to _WorkItems e.g.
# {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
self.pending_work_items = executor._pending_work_items
# A list of the work_ids that are currently running
self.running_work_items = executor._running_work_items
# A lock to avoid concurrent shutdown of workers on timeout and spawn
# of new processes or shut down
self.processes_management_lock = executor._processes_management_lock
super().__init__(name="ExecutorManagerThread")
if sys.version_info < (3, 9):
self.daemon = True
def run(self):
# Main loop for the executor manager thread.
while True:
self.add_call_item_to_queue()
result_item, is_broken, bpe = self.wait_result_broken_or_wakeup()
if is_broken:
self.terminate_broken(bpe)
return
if result_item is not None:
self.process_result_item(result_item)
# Delete reference to result_item to avoid keeping references
# while waiting on new results.
del result_item
if self.is_shutting_down():
self.flag_executor_shutting_down()
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not self.pending_work_items:
self.join_executor_internals()
return
def add_call_item_to_queue(self):
# Fills call_queue with _WorkItems from pending_work_items.
# This function never blocks.
while True:
if self.call_queue.full():
return
try:
work_id = self.work_ids_queue.get(block=False)
except queue.Empty:
return
else:
work_item = self.pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
self.running_work_items += [work_id]
self.call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del self.pending_work_items[work_id]
continue
def wait_result_broken_or_wakeup(self):
# Wait for a result to be ready in the result_queue while checking
# that all worker processes are still running, or for a wake up
# signal send. The wake up signals come either from new tasks being
# submitted, from the executor being shutdown/gc-ed, or from the
# shutdown of the python interpreter.
result_reader = self.result_queue._reader
wakeup_reader = self.thread_wakeup._reader
readers = [result_reader, wakeup_reader]
worker_sentinels = [p.sentinel for p in list(self.processes.values())]
ready = wait(readers + worker_sentinels)
bpe = None
is_broken = True
result_item = None
if result_reader in ready:
try:
result_item = result_reader.recv()
if isinstance(result_item, _RemoteTraceback):
bpe = BrokenProcessPool(
"A task has failed to un-serialize. Please ensure that"
" the arguments of the function are all picklable."
)
bpe.__cause__ = result_item
else:
is_broken = False
except BaseException as e:
bpe = BrokenProcessPool(
"A result has failed to un-serialize. Please ensure that "
"the objects returned by the function are always "
"picklable."
)
tb = traceback.format_exception(
type(e), e, getattr(e, "__traceback__", None))
bpe.__cause__ = _RemoteTraceback(''.join(tb))
elif wakeup_reader in ready:
# This is simply a wake-up event that might either trigger putting
# more tasks in the queue or trigger the clean up of resources.
is_broken = False
else:
# A worker has terminated and we don't know why, set the state of
# the executor as broken
exit_codes = ''
if sys.platform != "win32":
# In Windows, introspecting terminated workers exitcodes seems
# unstable, therefore they are not appended in the exception
# message.
exit_codes = (
"\nThe exit codes of the workers are "
f"{get_exitcodes_terminated_worker(self.processes)}"
)
mp.util.debug('A worker unexpectedly terminated. Workers that '
'might have caused the breakage: '
+ str({p.name: p.exitcode
for p in list(self.processes.values())
if p is not None and p.sentinel in ready}))
bpe = TerminatedWorkerError(
"A worker process managed by the executor was unexpectedly "
"terminated. This could be caused by a segmentation fault "
"while calling the function or by an excessive memory usage "
"causing the Operating System to kill the worker.\n"
f"{exit_codes}"
)
self.thread_wakeup.clear()
return result_item, is_broken, bpe
def process_result_item(self, result_item):
# Process the received a result_item. This can be either the PID of a
# worker that exited gracefully or a _ResultItem
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID, either on request
# by the executor.shutdown method or by the timeout of the worker
# itself: we should not mark the executor as broken.
with self.processes_management_lock:
p = self.processes.pop(result_item, None)
# p can be None if the executor is concurrently shutting down.
if p is not None:
p._worker_exit_lock.release()
mp.util.debug(
f"joining {p.name} when processing {p.pid} as result_item"
)
p.join()
del p
# Make sure the executor have the right number of worker, even if a
# worker timeout while some jobs were submitted. If some work is
# pending or there is less processes than running items, we need to
# start a new Process and raise a warning.
n_pending = len(self.pending_work_items)
n_running = len(self.running_work_items)
if (n_pending - n_running > 0 or n_running > len(self.processes)):
executor = self.executor_reference()
if (executor is not None
and len(self.processes) < executor._max_workers):
warnings.warn(
"A worker stopped while some jobs were given to the "
"executor. This can be caused by a too short worker "
"timeout or by a memory leak.", UserWarning
)
with executor._processes_management_lock:
executor._adjust_process_count()
executor = None
else:
# Received a _ResultItem so mark the future as completed.
work_item = self.pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
self.running_work_items.remove(result_item.work_id)
def is_shutting_down(self):
# Check whether we should start shutting down the executor.
executor = self.executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this thread is not broken AND
# * The executor that owns this worker has been collected OR
# * The executor that owns this worker has been shutdown.
# If the executor is broken, it should be detected in the next loop.
return (_global_shutdown or
((executor is None or self.executor_flags.shutdown)
and not self.executor_flags.broken))
def terminate_broken(self, bpe):
# Terminate the executor because it is in a broken state. The bpe
# argument can be used to display more information on the error that
# lead the executor into becoming broken.
# Mark the process pool broken so that submits fail right now.
self.executor_flags.flag_as_broken(bpe)
# Mark pending tasks as failed.
for work_item in self.pending_work_items.values():
work_item.future.set_exception(bpe)
# Delete references to object. See issue16284
del work_item
self.pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
self.kill_workers(reason="broken executor")
# clean up resources
self.join_executor_internals()
def flag_executor_shutting_down(self):
# Flag the executor as shutting down and cancel remaining tasks if
# requested as early as possible if it is not gc-ed yet.
self.executor_flags.flag_as_shutting_down()
# Cancel pending work items if requested.
if self.executor_flags.kill_workers:
while self.pending_work_items:
_, work_item = self.pending_work_items.popitem()
work_item.future.set_exception(ShutdownExecutorError(
"The Executor was shutdown with `kill_workers=True` "
"before this job could complete."))
del work_item
# Kill the remaining worker forcibly to no waste time joining them
self.kill_workers(reason="executor shutting down")
def kill_workers(self, reason=''):
# Terminate the remaining workers using SIGKILL. This function also
# terminates descendant workers of the children in case there is some
# nested parallelism.
while self.processes:
_, p = self.processes.popitem()
mp.util.debug(f"terminate process {p.name}, reason: {reason}")
try:
kill_process_tree(p)
except ProcessLookupError: # pragma: no cover
pass
def shutdown_workers(self):
# shutdown all workers in self.processes
# Create a list to avoid RuntimeError due to concurrent modification of
# processes. nb_children_alive is thus an upper bound. Also release the
# processes' _worker_exit_lock to accelerate the shutdown procedure, as
# there is no need for hand-shake here.
with self.processes_management_lock:
n_children_to_stop = 0
for p in list(self.processes.values()):
mp.util.debug(f"releasing worker exit lock on {p.name}")
p._worker_exit_lock.release()
n_children_to_stop += 1
mp.util.debug(f"found {n_children_to_stop} processes to stop")
# Send the right number of sentinels, to make sure all children are
# properly terminated. Do it with a mechanism that avoid hanging on
# Full queue when all workers have already been shutdown.
n_sentinels_sent = 0
cooldown_time = 0.001
while (n_sentinels_sent < n_children_to_stop
and self.get_n_children_alive() > 0):
for _ in range(n_children_to_stop - n_sentinels_sent):
try:
self.call_queue.put_nowait(None)
n_sentinels_sent += 1
except queue.Full as e:
if cooldown_time > 10.0:
raise e
mp.util.info(
"full call_queue prevented to send all sentinels at "
"once, waiting..."
)
sleep(cooldown_time)
cooldown_time *= 2
break
mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue")
def join_executor_internals(self):
self.shutdown_workers()
# Release the queue's resources as soon as possible. Flag the feeder
# thread for clean exit to avoid having the crash detection thread flag
# the Executor as broken during the shutdown. This is safe as either:
# * We don't need to communicate with the workers anymore
# * There is nothing left in the Queue buffer except None sentinels
mp.util.debug("closing call_queue")
self.call_queue.close()
self.call_queue.join_thread()
# Closing result_queue
mp.util.debug("closing result_queue")
self.result_queue.close()
mp.util.debug("closing thread_wakeup")
with self.shutdown_lock:
self.thread_wakeup.close()
# If .join() is not called on the created processes then
# some ctx.Queue methods may deadlock on macOS.
with self.processes_management_lock:
mp.util.debug(f"joining {len(self.processes)} processes")
n_joined_processes = 0
while True:
try:
pid, p = self.processes.popitem()
mp.util.debug(f"joining process {p.name} with pid {pid}")
p.join()
n_joined_processes += 1
except KeyError:
break
mp.util.debug(
"executor management thread clean shutdown of "
f"{n_joined_processes} workers"
)
def get_n_children_alive(self):
# This is an upper bound on the number of children alive.
with self.processes_management_lock:
return sum(p.is_alive() for p in list(self.processes.values()))
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked and _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# undetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = (
f"system provides too few semaphores ({nsems_max} available, "
"256 necessary)"
)
raise NotImplementedError(_system_limited)
def _chain_from_iterable_of_lists(iterable):
"""
Specialized implementation of itertools.chain.from_iterable.
Each item in *iterable* should be a list. This function is
careful not to keep references to yielded objects.
"""
for element in iterable:
element.reverse()
while element:
yield element.pop()
def _check_max_depth(context):
# Limit the maxmal recursion level
global _CURRENT_DEPTH
if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0:
raise LokyRecursionError(
"Could not spawn extra nested processes at depth superior to "
"MAX_DEPTH=1. It is not possible to increase this limit when "
"using the 'fork' start method.")
if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH:
raise LokyRecursionError(
"Could not spawn extra nested processes at depth superior to "
f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change "
"this limit with the LOKY_MAX_DEPTH environment variable.")
class LokyRecursionError(RuntimeError):
"""Raised when a process try to spawn too many levels of nested processes.
"""
class BrokenProcessPool(_BPPException):
"""
Raised when the executor is broken while a future was in the running state.
The cause can an error raised when unpickling the task in the worker
process or when unpickling the result value in the parent process. It can
also be caused by a worker process being terminated unexpectedly.
"""
class TerminatedWorkerError(BrokenProcessPool):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
# Alias for backward compat (for code written for loky 1.1.4 and earlier). Do
# not use in new code.
BrokenExecutor = BrokenProcessPool
class ShutdownExecutorError(RuntimeError):
"""
Raised when a ProcessPoolExecutor is shutdown while a future was in the
running or pending state.
"""
class ProcessPoolExecutor(Executor):
_at_exit = None
def __init__(self, max_workers=None, job_reducers=None,
result_reducers=None, timeout=None, context=None,
initializer=None, initargs=(), env=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: int, optional (default: cpu_count())
The maximum number of processes that can be used to execute the
given calls. If None or not given then as many worker processes
will be created as the number of CPUs the current process
can use.
job_reducers, result_reducers: dict(type: reducer_func)
Custom reducer for pickling the jobs and the results from the
Executor. If only `job_reducers` is provided, `result_reducer`
will use the same reducers
timeout: int, optional (default: None)
Idle workers exit after timeout seconds. If a new job is
submitted after the timeout, the executor will start enough
new Python processes to make sure the pool of workers is full.
context: A multiprocessing context to launch the workers. This
object should provide SimpleQueue, Queue and Process.
initializer: An callable used to initialize worker processes.
initargs: A tuple of arguments to pass to the initializer.
env: A dict of environment variable to overwrite in the child
process. The environment variables are set before any module is
loaded. Note that this only works with the loky context.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = cpu_count()
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
if context is None:
context = get_context()
self._context = context
self._env = env
self._initializer, self._initargs = _prepare_initializer(
initializer, initargs
)
_check_max_depth(self._context)
if result_reducers is None:
result_reducers = job_reducers
# Timeout
self._timeout = timeout
# Management thread
self._executor_manager_thread = None
# Map of pids to processes
self._processes = {}
# Internal variables of the ProcessPoolExecutor
self._processes = {}
self._queue_count = 0
self._pending_work_items = {}
self._running_work_items = []
self._work_ids = queue.Queue()
self._processes_management_lock = self._context.Lock()
self._executor_manager_thread = None
self._shutdown_lock = threading.Lock()
# _ThreadWakeup is a communication channel used to interrupt the wait
# of the main loop of executor_manager_thread from another thread (e.g.
# when calling executor.submit or executor.shutdown). We do not use the
# _result_queue to send wakeup signals to the executor_manager_thread
# as it could result in a deadlock if a worker process dies with the
# _result_queue write lock still acquired.
#
# _shutdown_lock must be locked to access _ThreadWakeup.wakeup.
self._executor_manager_thread_wakeup = _ThreadWakeup()
# Flag to hold the state of the Executor. This permits to introspect
# the Executor state even once it has been garbage collected.
self._flags = _ExecutorFlags(self._shutdown_lock)
# Finally setup the queues for interprocess communication
self._setup_queues(job_reducers, result_reducers)
mp.util.debug('ProcessPoolExecutor is setup')
def _setup_queues(self, job_reducers, result_reducers, queue_size=None):
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
if queue_size is None:
queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS
self._call_queue = _SafeQueue(
max_size=queue_size, pending_work_items=self._pending_work_items,
running_work_items=self._running_work_items,
thread_wakeup=self._executor_manager_thread_wakeup,
reducers=job_reducers, ctx=self._context)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue(reducers=result_reducers,
ctx=self._context)
def _start_executor_manager_thread(self):
if self._executor_manager_thread is None:
mp.util.debug('_start_executor_manager_thread called')
# Start the processes so that their sentinels are known.
self._executor_manager_thread = _ExecutorManagerThread(self)
self._executor_manager_thread.start()
# register this executor in a mechanism that ensures it will wakeup
# when the interpreter is exiting.
_threads_wakeups[self._executor_manager_thread] = \
(self._shutdown_lock,
self._executor_manager_thread_wakeup)
global process_pool_executor_at_exit
if process_pool_executor_at_exit is None:
# Ensure that the _python_exit function will be called before
# the multiprocessing.Queue._close finalizers which have an
# exitpriority of 10.
if sys.version_info < (3, 9):
process_pool_executor_at_exit = mp.util.Finalize(
None, _python_exit, exitpriority=20)
else:
process_pool_executor_at_exit = threading._register_atexit(
_python_exit)
def _adjust_process_count(self):
while len(self._processes) < self._max_workers:
worker_exit_lock = self._context.BoundedSemaphore(1)
args = (self._call_queue, self._result_queue, self._initializer,
self._initargs, self._processes_management_lock,
self._timeout, worker_exit_lock, _CURRENT_DEPTH + 1)
worker_exit_lock.acquire()
try:
# Try to spawn the process with some environment variable to
# overwrite but it only works with the loky context for now.
p = self._context.Process(target=_process_worker, args=args,
env=self._env)
except TypeError:
p = self._context.Process(target=_process_worker, args=args)
p._worker_exit_lock = worker_exit_lock
p.start()
self._processes[p.pid] = p
mp.util.debug(
f"Adjusted process count to {self._max_workers}: "
f"{[(p.name, pid) for pid, p in self._processes.items()]}"
)
def _ensure_executor_running(self):
"""ensures all workers and management thread are running
"""
with self._processes_management_lock:
if len(self._processes) != self._max_workers:
self._adjust_process_count()
self._start_executor_manager_thread()
def submit(self, fn, *args, **kwargs):
with self._flags.shutdown_lock:
if self._flags.broken is not None:
raise self._flags.broken
if self._flags.shutdown:
raise ShutdownExecutorError(
'cannot schedule new futures after shutdown')
# Cannot submit a new calls once the interpreter is shutting down.
# This check avoids spawning new processes at exit.
if _global_shutdown:
raise RuntimeError('cannot schedule new futures after '
'interpreter shutdown')
f = Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._executor_manager_thread_wakeup.wakeup()
self._ensure_executor_running()
return f
submit.__doc__ = Executor.submit.__doc__
def map(self, fn, *iterables, **kwargs):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: If greater than one, the iterables will be chopped into
chunks of size chunksize and submitted to the process pool.
If set to one, the items in the list will be sent one at a
time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout', None)
chunksize = kwargs.get('chunksize', 1)
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = super().map(
partial(_process_chunk, fn), _get_chunks(chunksize, *iterables),
timeout=timeout
)
return _chain_from_iterable_of_lists(results)
def shutdown(self, wait=True, kill_workers=False):
mp.util.debug(f'shutting down executor {self}')
self._flags.flag_as_shutting_down(kill_workers)
executor_manager_thread = self._executor_manager_thread
executor_manager_thread_wakeup = self._executor_manager_thread_wakeup
if executor_manager_thread_wakeup is not None:
# Wake up queue management thread
with self._shutdown_lock:
self._executor_manager_thread_wakeup.wakeup()
if executor_manager_thread is not None and wait:
executor_manager_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._executor_manager_thread = None
self._executor_manager_thread_wakeup = None
self._call_queue = None
self._result_queue = None
self._processes_management_lock = None
shutdown.__doc__ = Executor.shutdown.__doc__
| bsd-3-clause | 772e6ea41411a67113f9e1df497ac9d8 | 39.434243 | 79 | 0.590897 | 4.398902 | false | false | false | false |
joblib/joblib | joblib/numpy_pickle.py | 1 | 26896 | """Utilities for fast persistence of big data, with optional compression."""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import pickle
import os
import warnings
import io
from pathlib import Path
from .compressor import lz4, LZ4_NOT_INSTALLED_ERROR
from .compressor import _COMPRESSORS, register_compressor, BinaryZlibFile
from .compressor import (ZlibCompressorWrapper, GzipCompressorWrapper,
BZ2CompressorWrapper, LZMACompressorWrapper,
XZCompressorWrapper, LZ4CompressorWrapper)
from .numpy_pickle_utils import Unpickler, Pickler
from .numpy_pickle_utils import _read_fileobject, _write_fileobject
from .numpy_pickle_utils import _read_bytes, BUFFER_SIZE
from .numpy_pickle_utils import _ensure_native_byte_order
from .numpy_pickle_compat import load_compatibility
from .numpy_pickle_compat import NDArrayWrapper
# For compatibility with old versions of joblib, we need ZNDArrayWrapper
# to be visible in the current namespace.
# Explicitly skipping next line from flake8 as it triggers an F401 warning
# which we don't care.
from .numpy_pickle_compat import ZNDArrayWrapper # noqa
from .backports import make_memmap
# Register supported compressors
register_compressor('zlib', ZlibCompressorWrapper())
register_compressor('gzip', GzipCompressorWrapper())
register_compressor('bz2', BZ2CompressorWrapper())
register_compressor('lzma', LZMACompressorWrapper())
register_compressor('xz', XZCompressorWrapper())
register_compressor('lz4', LZ4CompressorWrapper())
###############################################################################
# Utility objects for persistence.
# For convenience, 16 bytes are used to be sure to cover all the possible
# dtypes' alignments. For reference, see:
# https://numpy.org/devdocs/dev/alignment.html
NUMPY_ARRAY_ALIGNMENT_BYTES = 16
class NumpyArrayWrapper(object):
"""An object to be persisted instead of numpy arrays.
This object is used to hack into the pickle machinery and read numpy
array data from our custom persistence format.
More precisely, this object is used for:
* carrying the information of the persisted array: subclass, shape, order,
dtype. Those ndarray metadata are used to correctly reconstruct the array
with low level numpy functions.
* determining if memmap is allowed on the array.
* reading the array bytes from a file.
* reading the array using memorymap from a file.
* writing the array bytes to a file.
Attributes
----------
subclass: numpy.ndarray subclass
Determine the subclass of the wrapped array.
shape: numpy.ndarray shape
Determine the shape of the wrapped array.
order: {'C', 'F'}
Determine the order of wrapped array data. 'C' is for C order, 'F' is
for fortran order.
dtype: numpy.ndarray dtype
Determine the data type of the wrapped array.
allow_mmap: bool
Determine if memory mapping is allowed on the wrapped array.
Default: False.
"""
def __init__(self, subclass, shape, order, dtype, allow_mmap=False,
numpy_array_alignment_bytes=NUMPY_ARRAY_ALIGNMENT_BYTES):
"""Constructor. Store the useful information for later."""
self.subclass = subclass
self.shape = shape
self.order = order
self.dtype = dtype
self.allow_mmap = allow_mmap
# We make numpy_array_alignment_bytes an instance attribute to allow us
# to change our mind about the default alignment and still load the old
# pickles (with the previous alignment) correctly
self.numpy_array_alignment_bytes = numpy_array_alignment_bytes
def safe_get_numpy_array_alignment_bytes(self):
# NumpyArrayWrapper instances loaded from joblib <= 1.1 pickles don't
# have an numpy_array_alignment_bytes attribute
return getattr(self, 'numpy_array_alignment_bytes', None)
def write_array(self, array, pickler):
"""Write array bytes to pickler file handle.
This function is an adaptation of the numpy write_array function
available in version 1.10.1 in numpy/lib/format.py.
"""
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out with version 2 of the
# pickle protocol.
pickle.dump(array, pickler.file_handle, protocol=2)
else:
numpy_array_alignment_bytes = \
self.safe_get_numpy_array_alignment_bytes()
if numpy_array_alignment_bytes is not None:
current_pos = pickler.file_handle.tell()
pos_after_padding_byte = current_pos + 1
padding_length = numpy_array_alignment_bytes - (
pos_after_padding_byte % numpy_array_alignment_bytes)
# A single byte is written that contains the padding length in
# bytes
padding_length_byte = int.to_bytes(
padding_length, length=1, byteorder='little')
pickler.file_handle.write(padding_length_byte)
if padding_length != 0:
padding = b'\xff' * padding_length
pickler.file_handle.write(padding)
for chunk in pickler.np.nditer(array,
flags=['external_loop',
'buffered',
'zerosize_ok'],
buffersize=buffersize,
order=self.order):
pickler.file_handle.write(chunk.tobytes('C'))
def read_array(self, unpickler):
"""Read array from unpickler file handle.
This function is an adaptation of the numpy read_array function
available in version 1.10.1 in numpy/lib/format.py.
"""
if len(self.shape) == 0:
count = 1
else:
# joblib issue #859: we cast the elements of self.shape to int64 to
# prevent a potential overflow when computing their product.
shape_int64 = [unpickler.np.int64(x) for x in self.shape]
count = unpickler.np.multiply.reduce(shape_int64)
# Now read the actual data.
if self.dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
array = pickle.load(unpickler.file_handle)
else:
numpy_array_alignment_bytes = \
self.safe_get_numpy_array_alignment_bytes()
if numpy_array_alignment_bytes is not None:
padding_byte = unpickler.file_handle.read(1)
padding_length = int.from_bytes(
padding_byte, byteorder='little')
if padding_length != 0:
unpickler.file_handle.read(padding_length)
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE,
self.dtype.itemsize)
array = unpickler.np.empty(count, dtype=self.dtype)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * self.dtype.itemsize)
data = _read_bytes(unpickler.file_handle,
read_size, "array data")
array[i:i + read_count] = \
unpickler.np.frombuffer(data, dtype=self.dtype,
count=read_count)
del data
if self.order == 'F':
array.shape = self.shape[::-1]
array = array.transpose()
else:
array.shape = self.shape
# Detect byte order mismatch and swap as needed.
return _ensure_native_byte_order(array)
def read_mmap(self, unpickler):
"""Read an array using numpy memmap."""
current_pos = unpickler.file_handle.tell()
offset = current_pos
numpy_array_alignment_bytes = \
self.safe_get_numpy_array_alignment_bytes()
if numpy_array_alignment_bytes is not None:
padding_byte = unpickler.file_handle.read(1)
padding_length = int.from_bytes(padding_byte, byteorder='little')
# + 1 is for the padding byte
offset += padding_length + 1
if unpickler.mmap_mode == 'w+':
unpickler.mmap_mode = 'r+'
marray = make_memmap(unpickler.filename,
dtype=self.dtype,
shape=self.shape,
order=self.order,
mode=unpickler.mmap_mode,
offset=offset)
# update the offset so that it corresponds to the end of the read array
unpickler.file_handle.seek(offset + marray.nbytes)
if (numpy_array_alignment_bytes is None and
current_pos % NUMPY_ARRAY_ALIGNMENT_BYTES != 0):
message = (
f'The memmapped array {marray} loaded from the file '
f'{unpickler.file_handle.name} is not not bytes aligned. '
'This may cause segmentation faults if this memmapped array '
'is used in some libraries like BLAS or PyTorch. '
'To get rid of this warning, regenerate your pickle file '
'with joblib >= 1.2.0. '
'See https://github.com/joblib/joblib/issues/563 '
'for more details'
)
warnings.warn(message)
return _ensure_native_byte_order(marray)
def read(self, unpickler):
"""Read the array corresponding to this wrapper.
Use the unpickler to get all information to correctly read the array.
Parameters
----------
unpickler: NumpyUnpickler
Returns
-------
array: numpy.ndarray
"""
# When requested, only use memmap mode if allowed.
if unpickler.mmap_mode is not None and self.allow_mmap:
array = self.read_mmap(unpickler)
else:
array = self.read_array(unpickler)
# Manage array subclass case
if (hasattr(array, '__array_prepare__') and
self.subclass not in (unpickler.np.ndarray,
unpickler.np.memmap)):
# We need to reconstruct another subclass
new_array = unpickler.np.core.multiarray._reconstruct(
self.subclass, (0,), 'b')
return new_array.__array_prepare__(array)
else:
return array
###############################################################################
# Pickler classes
class NumpyPickler(Pickler):
"""A pickler to persist big data efficiently.
The main features of this object are:
* persistence of numpy arrays in a single file.
* optional compression with a special care on avoiding memory copies.
Attributes
----------
fp: file
File object handle used for serializing the input object.
protocol: int, optional
Pickle protocol used. Default is pickle.DEFAULT_PROTOCOL.
"""
dispatch = Pickler.dispatch.copy()
def __init__(self, fp, protocol=None):
self.file_handle = fp
self.buffered = isinstance(self.file_handle, BinaryZlibFile)
# By default we want a pickle protocol that only changes with
# the major python version and not the minor one
if protocol is None:
protocol = pickle.DEFAULT_PROTOCOL
Pickler.__init__(self, self.file_handle, protocol=protocol)
# delayed import of numpy, to avoid tight coupling
try:
import numpy as np
except ImportError:
np = None
self.np = np
def _create_array_wrapper(self, array):
"""Create and returns a numpy array wrapper from a numpy array."""
order = 'F' if (array.flags.f_contiguous and
not array.flags.c_contiguous) else 'C'
allow_mmap = not self.buffered and not array.dtype.hasobject
kwargs = {}
try:
self.file_handle.tell()
except io.UnsupportedOperation:
kwargs = {'numpy_array_alignment_bytes': None}
wrapper = NumpyArrayWrapper(type(array),
array.shape, order, array.dtype,
allow_mmap=allow_mmap,
**kwargs)
return wrapper
def save(self, obj):
"""Subclass the Pickler `save` method.
This is a total abuse of the Pickler class in order to use the numpy
persistence function `save` instead of the default pickle
implementation. The numpy array is replaced by a custom wrapper in the
pickle persistence stack and the serialized array is written right
after in the file. Warning: the file produced does not follow the
pickle format. As such it can not be read with `pickle.load`.
"""
if self.np is not None and type(obj) in (self.np.ndarray,
self.np.matrix,
self.np.memmap):
if type(obj) is self.np.memmap:
# Pickling doesn't work with memmapped arrays
obj = self.np.asanyarray(obj)
# The array wrapper is pickled instead of the real array.
wrapper = self._create_array_wrapper(obj)
Pickler.save(self, wrapper)
# A framer was introduced with pickle protocol 4 and we want to
# ensure the wrapper object is written before the numpy array
# buffer in the pickle file.
# See https://www.python.org/dev/peps/pep-3154/#framing to get
# more information on the framer behavior.
if self.proto >= 4:
self.framer.commit_frame(force=True)
# And then array bytes are written right after the wrapper.
wrapper.write_array(obj, self)
return
return Pickler.save(self, obj)
class NumpyUnpickler(Unpickler):
"""A subclass of the Unpickler to unpickle our numpy pickles.
Attributes
----------
mmap_mode: str
The memorymap mode to use for reading numpy arrays.
file_handle: file_like
File object to unpickle from.
filename: str
Name of the file to unpickle from. It should correspond to file_handle.
This parameter is required when using mmap_mode.
np: module
Reference to numpy module if numpy is installed else None.
"""
dispatch = Unpickler.dispatch.copy()
def __init__(self, filename, file_handle, mmap_mode=None):
# The next line is for backward compatibility with pickle generated
# with joblib versions less than 0.10.
self._dirname = os.path.dirname(filename)
self.mmap_mode = mmap_mode
self.file_handle = file_handle
# filename is required for numpy mmap mode.
self.filename = filename
self.compat_mode = False
Unpickler.__init__(self, self.file_handle)
try:
import numpy as np
except ImportError:
np = None
self.np = np
def load_build(self):
"""Called to set the state of a newly created object.
We capture it to replace our place-holder objects, NDArrayWrapper or
NumpyArrayWrapper, by the array we are interested in. We
replace them directly in the stack of pickler.
NDArrayWrapper is used for backward compatibility with joblib <= 0.9.
"""
Unpickler.load_build(self)
# For backward compatibility, we support NDArrayWrapper objects.
if isinstance(self.stack[-1], (NDArrayWrapper, NumpyArrayWrapper)):
if self.np is None:
raise ImportError("Trying to unpickle an ndarray, "
"but numpy didn't import correctly")
array_wrapper = self.stack.pop()
# If any NDArrayWrapper is found, we switch to compatibility mode,
# this will be used to raise a DeprecationWarning to the user at
# the end of the unpickling.
if isinstance(array_wrapper, NDArrayWrapper):
self.compat_mode = True
self.stack.append(array_wrapper.read(self))
# Be careful to register our new method.
dispatch[pickle.BUILD[0]] = load_build
###############################################################################
# Utility functions
def dump(value, filename, compress=0, protocol=None, cache_size=None):
"""Persist an arbitrary Python object into one file.
Read more in the :ref:`User Guide <persistence>`.
Parameters
-----------
value: any Python object
The object to store to disk.
filename: str, pathlib.Path, or file object.
The file object or path of the file in which it is to be stored.
The compression method corresponding to one of the supported filename
extensions ('.z', '.gz', '.bz2', '.xz' or '.lzma') will be used
automatically.
compress: int from 0 to 9 or bool or 2-tuple, optional
Optional compression level for the data. 0 or False is no compression.
Higher value means more compression, but also slower read and
write times. Using a value of 3 is often a good compromise.
See the notes for more details.
If compress is True, the compression level used is 3.
If compress is a 2-tuple, the first element must correspond to a string
between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma'
'xz'), the second element must be an integer from 0 to 9, corresponding
to the compression level.
protocol: int, optional
Pickle protocol, see pickle.dump documentation for more details.
cache_size: positive int, optional
This option is deprecated in 0.10 and has no effect.
Returns
-------
filenames: list of strings
The list of file names in which the data is stored. If
compress is false, each array is stored in a different file.
See Also
--------
joblib.load : corresponding loader
Notes
-----
Memmapping on load cannot be used for compressed files. Thus
using compression can significantly slow down loading. In
addition, compressed files take extra extra memory during
dump and load.
"""
if Path is not None and isinstance(filename, Path):
filename = str(filename)
is_filename = isinstance(filename, str)
is_fileobj = hasattr(filename, "write")
compress_method = 'zlib' # zlib is the default compression method.
if compress is True:
# By default, if compress is enabled, we want the default compress
# level of the compressor.
compress_level = None
elif isinstance(compress, tuple):
# a 2-tuple was set in compress
if len(compress) != 2:
raise ValueError(
'Compress argument tuple should contain exactly 2 elements: '
'(compress method, compress level), you passed {}'
.format(compress))
compress_method, compress_level = compress
elif isinstance(compress, str):
compress_method = compress
compress_level = None # Use default compress level
compress = (compress_method, compress_level)
else:
compress_level = compress
if compress_method == 'lz4' and lz4 is None:
raise ValueError(LZ4_NOT_INSTALLED_ERROR)
if (compress_level is not None and
compress_level is not False and
compress_level not in range(10)):
# Raising an error if a non valid compress level is given.
raise ValueError(
'Non valid compress level given: "{}". Possible values are '
'{}.'.format(compress_level, list(range(10))))
if compress_method not in _COMPRESSORS:
# Raising an error if an unsupported compression method is given.
raise ValueError(
'Non valid compression method given: "{}". Possible values are '
'{}.'.format(compress_method, _COMPRESSORS))
if not is_filename and not is_fileobj:
# People keep inverting arguments, and the resulting error is
# incomprehensible
raise ValueError(
'Second argument should be a filename or a file-like object, '
'%s (type %s) was given.'
% (filename, type(filename))
)
if is_filename and not isinstance(compress, tuple):
# In case no explicit compression was requested using both compression
# method and level in a tuple and the filename has an explicit
# extension, we select the corresponding compressor.
# unset the variable to be sure no compression level is set afterwards.
compress_method = None
for name, compressor in _COMPRESSORS.items():
if filename.endswith(compressor.extension):
compress_method = name
if compress_method in _COMPRESSORS and compress_level == 0:
# we choose the default compress_level in case it was not given
# as an argument (using compress).
compress_level = None
if cache_size is not None:
# Cache size is deprecated starting from version 0.10
warnings.warn("Please do not set 'cache_size' in joblib.dump, "
"this parameter has no effect and will be removed. "
"You used 'cache_size={}'".format(cache_size),
DeprecationWarning, stacklevel=2)
if compress_level != 0:
with _write_fileobject(filename, compress=(compress_method,
compress_level)) as f:
NumpyPickler(f, protocol=protocol).dump(value)
elif is_filename:
with open(filename, 'wb') as f:
NumpyPickler(f, protocol=protocol).dump(value)
else:
NumpyPickler(filename, protocol=protocol).dump(value)
# If the target container is a file object, nothing is returned.
if is_fileobj:
return
# For compatibility, the list of created filenames (e.g with one element
# after 0.10.0) is returned by default.
return [filename]
def _unpickle(fobj, filename="", mmap_mode=None):
"""Internal unpickling function."""
# We are careful to open the file handle early and keep it open to
# avoid race-conditions on renames.
# That said, if data is stored in companion files, which can be
# the case with the old persistence format, moving the directory
# will create a race when joblib tries to access the companion
# files.
unpickler = NumpyUnpickler(filename, fobj, mmap_mode=mmap_mode)
obj = None
try:
obj = unpickler.load()
if unpickler.compat_mode:
warnings.warn("The file '%s' has been generated with a "
"joblib version less than 0.10. "
"Please regenerate this pickle file."
% filename,
DeprecationWarning, stacklevel=3)
except UnicodeDecodeError as exc:
# More user-friendly error message
new_exc = ValueError(
'You may be trying to read with '
'python 3 a joblib pickle generated with python 2. '
'This feature is not supported by joblib.')
new_exc.__cause__ = exc
raise new_exc
return obj
def load_temporary_memmap(filename, mmap_mode, unlink_on_gc_collect):
from ._memmapping_reducer import JOBLIB_MMAPS, add_maybe_unlink_finalizer
obj = load(filename, mmap_mode)
JOBLIB_MMAPS.add(obj.filename)
if unlink_on_gc_collect:
add_maybe_unlink_finalizer(obj)
return obj
def load(filename, mmap_mode=None):
"""Reconstruct a Python object from a file persisted with joblib.dump.
Read more in the :ref:`User Guide <persistence>`.
WARNING: joblib.load relies on the pickle module and can therefore
execute arbitrary Python code. It should therefore never be used
to load files from untrusted sources.
Parameters
-----------
filename: str, pathlib.Path, or file object.
The file object or path of the file from which to load the object
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, the arrays are memory-mapped from the disk. This
mode has no effect for compressed files. Note that in this
case the reconstructed object might no longer match exactly
the originally pickled object.
Returns
-------
result: any Python object
The object stored in the file.
See Also
--------
joblib.dump : function to save an object
Notes
-----
This function can load numpy array files saved separately during the
dump. If the mmap_mode argument is given, it is passed to np.load and
arrays are loaded as memmaps. As a consequence, the reconstructed
object might not match the original pickled object. Note that if the
file was saved with compression, the arrays cannot be memmapped.
"""
if Path is not None and isinstance(filename, Path):
filename = str(filename)
if hasattr(filename, "read"):
fobj = filename
filename = getattr(fobj, 'name', '')
with _read_fileobject(fobj, filename, mmap_mode) as fobj:
obj = _unpickle(fobj)
else:
with open(filename, 'rb') as f:
with _read_fileobject(f, filename, mmap_mode) as fobj:
if isinstance(fobj, str):
# if the returned file object is a string, this means we
# try to load a pickle file generated with an version of
# Joblib so we load it with joblib compatibility function.
return load_compatibility(fobj)
obj = _unpickle(fobj, filename, mmap_mode)
return obj
| bsd-3-clause | ba3c3a519f1bf9a5ca48d7d7132481a8 | 39.813354 | 79 | 0.608864 | 4.515782 | false | false | false | false |
airspeed-velocity/asv | asv/results.py | 1 | 32402 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import base64
import os
import re
import zlib
import itertools
import hashlib
import datetime
from . import environment, statistics, util
from .console import log
from .machine import Machine
def iter_results_paths(results):
"""
Iterate over all of the result file paths.
"""
skip_files = set([
'machine.json', 'benchmarks.json'
])
for root, dirs, files in os.walk(results):
# Iterate over files only if machine.json is valid json
machine_json = os.path.join(root, "machine.json")
try:
data = util.load_json(machine_json, api_version=Machine.api_version)
machine_name = data.get('machine')
if not isinstance(machine_name, str):
raise util.UserError(f"malformed {machine_json}")
except util.UserError as err:
machine_json_err = f"Skipping results: {err}"
except IOError:
machine_json_err = f"Skipping results: could not load {machine_json}"
else:
machine_json_err = None
# Iterate over files
for filename in files:
if filename not in skip_files and filename.endswith('.json'):
if machine_json_err is not None:
# Show the warning only if there are some files to load
log.warning(machine_json_err)
break
yield (root, filename, machine_name)
def iter_results(results):
"""
Iterate over all of the result files.
"""
for (root, filename, machine_name) in iter_results_paths(results):
try:
yield Results.load(os.path.join(root, filename), machine_name=machine_name)
except util.UserError as exc:
log.warning(str(exc))
def iter_results_for_machine(results, machine_name):
"""
Iterate over all of the result files for a particular machine.
"""
return iter_results(os.path.join(results, machine_name))
def iter_results_for_machine_and_hash(results, machine_name, commit):
"""
Iterate over all of the result files with a given hash for a
particular machine.
"""
full_commit = get_result_hash_from_prefix(results, machine_name, commit)
for (root, filename, machine_name) in iter_results_paths(
os.path.join(results, machine_name)):
results_commit = filename.split('-')[0]
if results_commit == full_commit:
try:
yield Results.load(os.path.join(root, filename), machine_name=machine_name)
except util.UserError as exc:
log.warning(str(exc))
def iter_existing_hashes(results):
"""
Iterate over all of the result commit hashes and dates and yields
commit_hash.
May return duplicates. Use `get_existing_hashes` if that matters.
"""
for result in iter_results(results):
yield result.commit_hash
def get_existing_hashes(results):
"""
Get a list of the commit hashes that have already been tested.
"""
log.info("Getting existing hashes")
hashes = list(set(iter_existing_hashes(results)))
return hashes
def get_result_hash_from_prefix(results, machine_name, commit_prefix):
"""
Get the 8-char result commit identifier from a potentially shorter
prefix. Only considers the set of commits that have had
results computed.
Returns None if there are no matches. Raises a UserError
if the prefix is non-unique.
"""
commits = set([])
path = os.path.join(results, machine_name)
for (root, filename, r_machine_name) in iter_results_paths(path):
if r_machine_name != machine_name:
log.warning(f"Skipping results '{os.path.join(root, filename)}':"
f" machine name is not '{machine_name}'")
continue
results_commit = filename.split('-')[0]
cmp_len = min(len(commit_prefix), len(results_commit))
if results_commit[:cmp_len] == commit_prefix[:cmp_len]:
commits.add(results_commit)
if len(commits) > 1:
commit_list_str = ', '.join(sorted(commits))
raise util.UserError('Git hash prefix could represent one of '
f'multiple commits: {commit_list_str}')
elif len(commits) == 1:
return list(commits)[0]
else:
return None
def get_filename(machine, commit_hash, env_name):
"""
Get the result filename for a given machine, commit_hash and
environment.
If the environment name is too long, use its hash instead.
"""
if env_name and len(env_name) >= 128:
env_name = "env-" + hashlib.md5(env_name.encode('utf-8')).hexdigest()
return os.path.join(
machine,
f"{commit_hash[:8]}-{env_name}.json")
def _compatible_results(result, result_params, params):
"""
For parameterized benchmarks, obtain values from *result* that
are compatible with parameters of *benchmark*
"""
if result is None:
# All results missing, eg. build failure
return [None for param in itertools.product(*params)]
# Pick results for those parameters that also appear in the
# current benchmark
old_results = {}
for param, value in zip(itertools.product(*result_params), result):
old_results[param] = value
new_results = []
for param in itertools.product(*params):
new_results.append(old_results.get(param))
return new_results
class Results:
"""
Manage a set of benchmark results for a single machine and commit
hash.
"""
api_version = 2
def __init__(self,
params,
requirements,
commit_hash,
date,
python,
env_name,
env_vars):
"""
Parameters
----------
params : dict
Parameters describing the environment in which the
benchmarks were run.
requirements : list
Requirements of the benchmarks being run.
commit_hash : str
The commit hash for the benchmark run.
date : int
JavaScript timestamp for when the commit was merged into
the repository.
python : str
A Python version specifier.
env_name : str
Environment name
env_vars: dict
Environment variables
"""
self._params = params
self._requirements = requirements
self._commit_hash = commit_hash
self._date = date
self._results = {}
self._samples = {}
self._stats = {}
self._benchmark_params = {}
self._profiles = {}
self._python = python
self._env_name = env_name
self._started_at = {}
self._duration = {}
self._benchmark_version = {}
self._env_vars = env_vars
# Note: stderr and errcode are not saved to files
self._stderr = {}
self._errcode = {}
if commit_hash is not None:
self._filename = get_filename(
params['machine'], self._commit_hash, env_name)
else:
self._filename = None
@classmethod
def unnamed(cls):
return cls({}, {}, None, None, None, None, {})
@property
def commit_hash(self):
return self._commit_hash
@property
def date(self):
return self._date
@property
def params(self):
return self._params
@property
def env_vars(self):
return self._env_vars
@property
def started_at(self):
return self._started_at
@property
def duration(self):
return self._duration
def set_build_duration(self, value):
self._duration["<build>"] = float(value)
def set_setup_cache_duration(self, setup_cache_key, value):
self._duration[f"<setup_cache {setup_cache_key}>"] = float(value)
@property
def benchmark_version(self):
return self._benchmark_version
@property
def stderr(self):
return self._stderr
@property
def errcode(self):
return self._errcode
def get_all_result_keys(self):
"""
Return all available result keys.
"""
return self._results.keys()
def get_result_keys(self, benchmarks):
"""
Return result keys corresponding to benchmarks.
Parameters
----------
benchmarks : Benchmarks
Benchmarks to return results for.
Used for checking benchmark versions.
Returns
-------
keys : set
Set of benchmark result keys
"""
keys = set()
for key in self._results.keys():
if key not in benchmarks:
continue
version = self._benchmark_version.get(key)
bench_version = benchmarks[key].get('version')
if version is not None and version != bench_version:
continue
keys.add(key)
return keys
def get_result_value(self, key, params):
"""
Return the value of benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
value : {float, list of float}
Benchmark result value. If the benchmark is parameterized, return
a list of values.
"""
return _compatible_results(self._results[key],
self._benchmark_params[key],
params)
def get_result_stats(self, key, params):
"""
Return the statistical information of a benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
stats : {None, dict, list of dict}
Result statistics. If the benchmark is parameterized,
return a list of values.
"""
return _compatible_results(self._stats[key],
self._benchmark_params[key],
params)
def get_result_samples(self, key, params):
"""
Return the raw data points of a benchmark result.
Parameters
----------
key : str
Benchmark name to return results for
params : {list of list, None}
Set of benchmark parameters to return values for
Returns
-------
samples : {None, list}
Raw result samples. If the benchmark is parameterized,
return a list of values.
"""
return _compatible_results(self._samples[key],
self._benchmark_params[key],
params)
def get_result_params(self, key):
"""
Return the benchmark parameters of the given result
"""
return self._benchmark_params[key]
def remove_result(self, key):
"""
Remove results corresponding to a given benchmark.
"""
del self._results[key]
del self._benchmark_params[key]
del self._samples[key]
del self._stats[key]
# Remove profiles (may be missing)
self._profiles.pop(key, None)
# Remove run times (may be missing in old files)
self._started_at.pop(key, None)
self._duration.pop(key, None)
# Remove version (may be missing)
self._benchmark_version.pop(key, None)
def remove_samples(self, key, selected_idx=None):
"""
Remove measurement samples from the selected benchmark.
"""
if key not in self._results:
raise ValueError(key)
if selected_idx is None:
self._samples[key] = None
elif self._samples[key] is not None:
for j in selected_idx:
self._samples[key][j] = None
def add_result(self, benchmark, result,
started_at=None, duration=None,
record_samples=False,
append_samples=False,
selected_idx=None):
"""
Add benchmark result.
Parameters
----------
benchmark : dict
Benchmark object
result : runner.BenchmarkResult
Result of the benchmark.
started_at : datetime.datetime, optional
Benchmark start time.
duration : float, optional
Benchmark total duration in seconds.
record_samples : bool, optional
Whether to save samples.
append_samples : bool, optional
Whether to combine new samples with old.
selected_idx : set, optional
Which indices in a parametrized benchmark to update
"""
new_result = list(result.result)
new_samples = list(result.samples)
new_number = result.number
benchmark_name = benchmark['name']
benchmark_version = benchmark['version']
if started_at is None:
started_at = datetime.datetime.utcnow()
new_stats = [None] * len(new_result)
if (benchmark_name in self._results and
benchmark_version == self._benchmark_version.get(benchmark_name)):
# Append to old samples, if requested
if append_samples:
old_samples = self.get_result_samples(benchmark_name, benchmark['params'])
for j in range(len(new_samples)):
if old_samples[j] is not None and new_samples[j] is not None:
new_samples[j] = old_samples[j] + new_samples[j]
# Retain old result where requested
merge_idx = [j for j in range(len(new_result))
if selected_idx is not None and j not in selected_idx]
if merge_idx:
old_result = self.get_result_value(benchmark_name, benchmark['params'])
old_samples = self.get_result_samples(benchmark_name, benchmark['params'])
old_stats = self.get_result_stats(benchmark_name, benchmark['params'])
for j in merge_idx:
new_result[j] = old_result[j]
new_samples[j] = old_samples[j]
new_stats[j] = old_stats[j]
# Recompute stats for updated entries (and drop unnecessary data)
for j, (r, s, n) in enumerate(zip(new_result, new_samples, new_number)):
if util.is_na(r):
new_samples[j] = None
new_stats[j] = None
continue
if n is not None:
new_result[j], new_stats[j] = statistics.compute_stats(s, n)
# Compress None lists to just None
if all(x is None for x in new_result):
new_result = None
if all(x is None for x in new_samples):
new_samples = None
if all(x is None for x in new_stats):
new_stats = None
# Drop samples if requested
if not record_samples:
new_samples = None
# Store result
self._results[benchmark_name] = new_result
self._stats[benchmark_name] = new_stats
self._samples[benchmark_name] = new_samples
self._benchmark_params[benchmark_name] = benchmark['params'] if benchmark['params'] else []
self._started_at[benchmark_name] = util.datetime_to_js_timestamp(started_at)
if duration is None:
self._duration.pop(benchmark_name, None)
else:
self._duration[benchmark_name] = float(duration)
self._benchmark_version[benchmark_name] = benchmark_version
self._stderr[benchmark_name] = result.stderr
self._errcode[benchmark_name] = result.errcode
if result.profile:
profile_data = base64.b64encode(zlib.compress(result.profile))
profile_data = profile_data.decode('ascii')
self._profiles[benchmark_name] = profile_data
def get_profile(self, benchmark_name):
"""
Get the profile data for the given benchmark name.
Parameters
----------
benchmark_name : str
Name of benchmark
Returns
-------
profile_data : bytes
Raw profile data
"""
profile_data = self._profiles[benchmark_name]
profile_data = profile_data.encode('ascii')
return zlib.decompress(base64.b64decode(profile_data))
def has_profile(self, benchmark_name):
"""
Does the given benchmark data have profiling information?
"""
return benchmark_name in self._profiles
def save(self, result_dir):
"""
Save the results to disk, replacing existing results.
Parameters
----------
result_dir : str
Path to root of results tree.
"""
if self._filename is None:
raise ValueError("Cannot save unnamed Results")
path = os.path.join(result_dir, self._filename)
results = {}
simple_dict = {
'result': self._results,
'params': self._benchmark_params,
'version': self._benchmark_version,
'started_at': self._started_at,
'duration': self._duration,
'samples': self._samples,
'profile': self._profiles,
}
all_keys = ['result', 'params', 'version', 'started_at', 'duration',
'stats_ci_99_a', 'stats_ci_99_b', 'stats_q_25', 'stats_q_75',
'stats_number', 'stats_repeat', 'samples', 'profile']
for name in self._results.keys():
row = []
for key in all_keys:
if key in simple_dict:
value = simple_dict[key].get(name)
else:
assert key[:6] == 'stats_'
z = self._stats[name]
if z is None:
value = None
else:
value = [x.get(key[6:]) if x is not None else None
for x in z]
if key != 'params':
if isinstance(value, list) and all(x is None for x in value):
value = None
if key.startswith('stats_') or key == 'duration':
value = util.truncate_float_list(value)
row.append(value)
while row and row[-1] is None:
row.pop()
results[name] = row
other_durations = {}
for key, value in self._duration.items():
if key.startswith('<'):
other_durations[key] = value
data = {
'commit_hash': self._commit_hash,
'env_name': self._env_name,
'date': self._date,
'params': self._params,
'python': self._python,
'requirements': self._requirements,
'env_vars': self._env_vars,
'result_columns': all_keys,
'results': results,
'durations': other_durations,
}
util.write_json(path, data, self.api_version, compact=True)
def load_data(self, result_dir):
"""
Load previous results for the current parameters (if any).
"""
if self._filename is None:
raise ValueError("Cannot load unnamed Results")
path = os.path.join(result_dir, self._filename)
if os.path.isfile(path):
old = self.load(path)
for dict_name in ('_results', '_samples', '_stats', '_env_vars',
'_benchmark_params', '_profiles', '_started_at',
'_duration', '_benchmark_version'):
setattr(self, dict_name, getattr(old, dict_name))
@classmethod
def load(cls, path, machine_name=None):
"""
Load results from disk.
Parameters
----------
path : str
Path to results file.
machine_name : str, optional
If given, check that the results file is for the given machine.
"""
d = util.load_json(path, cls.api_version)
d.setdefault('env_vars', {})
try:
obj = cls(
d['params'],
d['requirements'],
d['commit_hash'],
d['date'],
d['python'],
d['env_name'],
d['env_vars'],
)
obj._results = {}
obj._samples = {}
obj._stats = {}
obj._benchmark_params = {}
obj._profiles = {}
obj._started_at = {}
obj._duration = d.get('durations', {})
obj._benchmark_version = {}
simple_keys = {
'result': obj._results,
'params': obj._benchmark_params,
'version': obj._benchmark_version,
'started_at': obj._started_at,
'duration': obj._duration,
'samples': obj._samples,
'profile': obj._profiles,
}
for name, key_values in d['results'].items():
for key, value in zip(d['result_columns'], key_values):
key_dict = simple_keys.get(key)
if key_dict is not None:
key_dict[name] = value
continue
elif key.startswith('stats_'):
if value is not None:
if name not in obj._stats:
obj._stats[name] = [{} for _ in value]
stats_key = key[6:]
for j, v in enumerate(value):
if v is not None:
obj._stats[name][j][stats_key] = v
else:
raise KeyError(f"unknown data key {key}")
for key_dict in simple_keys.values():
key_dict.setdefault(name, None)
obj._stats.setdefault(name, None)
obj._filename = os.path.join(*path.split(os.path.sep)[-2:])
except KeyError as exc:
raise util.UserError(
f"Error loading results file '{path}': missing key {exc}")
if machine_name is not None and obj.params.get('machine') != machine_name:
raise util.UserError(
f"Error loading results file '{path}': machine name is not '{machine_name}'")
return obj
def rm(self, result_dir):
if self._filename is None:
raise ValueError("Cannot remove unnamed Results")
path = os.path.join(result_dir, self._filename)
os.remove(path)
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version, compact=True)
@property
def env_name(self):
return self._env_name
#
# Old data format support
#
@classmethod
def update_to_2(cls, d):
"""
Reformat data in api_version 1 format to version 2.
"""
try:
d2 = {}
d2['commit_hash'] = d['commit_hash']
d2['date'] = d['date']
d2['env_name'] = d.get('env_name',
environment.get_env_name('',
d['python'],
d['requirements'],
{}))
d2['params'] = d['params']
d2['python'] = d['python']
d2['requirements'] = d['requirements']
d2['env_vars'] = d.get('env_vars', {})
# Backward-compatible load
results = {}
samples = {}
stats = {}
benchmark_params = {}
for key, value in d['results'].items():
# Backward compatibility
if not isinstance(value, dict):
value = {'result': [value], 'samples': None,
'stats': None, 'params': []}
if not isinstance(value['result'], list):
value['result'] = [value['result']]
if 'stats' in value and not isinstance(value['stats'], list):
value['stats'] = [value['stats']]
value.setdefault('samples', None)
value.setdefault('stats', None)
value.setdefault('params', [])
# Assign results
results[key] = value['result']
samples[key] = value['samples']
stats[key] = value['stats']
benchmark_params[key] = value['params']
if 'profiles' in d:
profiles = d['profiles']
else:
profiles = {}
started_at = d.get('started_at', {})
duration = d.get('duration', {})
benchmark_version = d.get('benchmark_version', {})
# Convert to new format
getters = [
('result', results, None),
('params', benchmark_params, None),
('version', benchmark_version, None),
('started_at', started_at, None),
('duration', duration, None),
('stats_ci_99_a', stats, lambda z: z['ci_99'][0]),
('stats_ci_99_b', stats, lambda z: z['ci_99'][1]),
('stats_q_25', stats, lambda z: z.get('q_25')),
('stats_q_75', stats, lambda z: z.get('q_75')),
('stats_number', stats, lambda z: z.get('number')),
('stats_repeat', stats, lambda z: z.get('repeat')),
('samples', samples, None),
('profile', profiles, None),
]
names = set()
for key_dict in (results, benchmark_params):
names.update(key_dict.keys())
d2['result_columns'] = [x[0] for x in getters]
d2['results'] = {}
for name in sorted(names):
r = []
for key_name, key_dict, key_getter in getters:
value = key_dict.get(name)
if key_getter is not None and value is not None:
if isinstance(value, list):
value = [key_getter(z) if z is not None else None
for z in value]
else:
value = key_getter(value)
if key_name.startswith('stats_') or key_name == 'duration':
value = util.truncate_float_list(value)
if key_name == 'params' and value is None:
value = []
if key_name != 'params' and isinstance(value, list):
if all(x is None for x in value):
value = None
r.append(value)
while r and r[-1] is None:
r.pop()
d2['results'][name] = r
d2['durations'] = {}
for key, value in duration.items():
if key.startswith('<'):
d2['durations'][key] = value
return d2
except KeyError as exc:
raise util.UserError(
f"Error loading results data: missing key {exc}")
def format_benchmark_result(results, benchmark):
"""
Pretty-print a benchmark result to human-readable form.
Parameters
----------
results : Results
Result set object
benchmark : dict
Benchmark dictionary
Returns
-------
info : {str, None}
One-line description of results
details : {str, None}
Additional details
"""
name = benchmark['name']
result = results.get_result_value(name, benchmark['params'])
stats = results.get_result_stats(name, benchmark['params'])
total_count = len(result)
failure_count = sum(r is None for r in result)
info = None
details = None
# Display status
if failure_count > 0:
if failure_count == total_count:
info = "failed"
else:
info = f"{failure_count}/{total_count} failed"
# Display results
if benchmark['params']:
# Long format display
if failure_count == 0:
info = "ok"
display_result = [(v, statistics.get_err(v, s) if s is not None else None)
for v, s in zip(result, stats)]
display = _format_benchmark_result(display_result, benchmark)
display = "\n".join(display).strip()
details = display
else:
if failure_count == 0:
# Failure already shown above
if not result:
display = "[]"
else:
if stats[0]:
err = statistics.get_err(result[0], stats[0])
else:
err = None
display = util.human_value(result[0], benchmark['unit'], err=err)
if len(result) > 1:
display += ";..."
info = display
return info, details
def _format_benchmark_result(result, benchmark, max_width=None):
"""
Format the result from a parameterized benchmark as an ASCII table
"""
if not result:
return ['[]']
def do_formatting(num_column_params):
# Fold result to a table
if num_column_params > 0:
column_params = benchmark['params'][-num_column_params:]
else:
column_params = []
rows = []
if column_params:
row_params = benchmark['params'][:-len(column_params)]
header = benchmark['param_names'][:len(row_params)]
column_param_permutations = list(itertools.product(*column_params))
header += [" / ".join(_format_param_value(value) for value in values)
for values in column_param_permutations]
rows.append(header)
column_items = len(column_param_permutations)
name_header = " / ".join(benchmark['param_names'][len(row_params):])
else:
column_items = 1
row_params = benchmark['params']
name_header = ""
header = benchmark['param_names']
rows.append(header)
for j, values in enumerate(itertools.product(*row_params)):
row_results = [util.human_value(x[0], benchmark['unit'], err=x[1])
for x in result[j * column_items:(j + 1) * column_items]]
row = [_format_param_value(value) for value in values] + row_results
rows.append(row)
if name_header:
display = util.format_text_table(rows, 1,
top_header_text=name_header,
top_header_span_start=len(row_params))
else:
display = util.format_text_table(rows, 1)
return display.splitlines()
# Determine how many parameters can be fit to columns
if max_width is None:
max_width = util.terminal_width * 3 // 4
text = do_formatting(0)
for j in range(1, len(benchmark['params'])):
new_text = do_formatting(j)
width = max(len(line) for line in new_text)
if width < max_width:
text = new_text
else:
break
return text
def _format_param_value(value_repr):
"""
Format a parameter value for displaying it as test output. The
values are string obtained via Python repr.
"""
regexs = ["^'(.+)'$",
"^u'(.+)'$",
"^<class '(.+)'>$"]
for regex in regexs:
m = re.match(regex, value_repr)
if m and m.group(1).strip():
return m.group(1)
return value_repr
| bsd-3-clause | 6c96258b3ddfaa956dbffb311c20215b | 30.829077 | 99 | 0.522097 | 4.505284 | false | false | false | false |
ejeschke/ginga | ginga/util/wcsmod/wcs_astropy.py | 3 | 8561 | #
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
import astropy.wcs as pywcs
from astropy.io import fits as pyfits
from astropy import coordinates, units
from ginga.util.wcsmod import common
try:
import sunpy.coordinates # noqa
except ImportError:
pass
coord_types = [f.name for f in
coordinates.frame_transform_graph.frame_set]
class AstropyWCS(common.BaseWCS):
"""
A WCS interface for astropy.wcs
You need to install python module 'astropy'
(http://pypi.python.org/pypi/astropy)
if you want to use this version.
"""
def __init__(self, logger):
super(AstropyWCS, self).__init__(logger)
self.kind = 'astropy/WCSLIB'
def load_header(self, header, fobj=None):
try:
# reconstruct a pyfits header, because otherwise we take an
# incredible performance hit in astropy.wcs
self.logger.debug("Reconstructing astropy.io.fits header")
self.header = pyfits.Header(header.items())
self.logger.debug("Trying to make astropy-- wcs object")
self.wcs = pywcs.WCS(self.header, fobj=fobj, relax=True)
self.logger.debug("made astropy wcs object")
self.coordsys = common.get_coord_system_name(self.header)
self.logger.debug("Coordinate system is: %s" % (self.coordsys))
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def load_nddata(self, ndd):
try:
# reconstruct a pyfits header, because otherwise we take an
# incredible performance hit in astropy.wcs
self.logger.debug("Reconstructing astropy.io.fits header")
self.header = pyfits.Header(ndd.meta)
if ndd.wcs is None:
self.logger.debug("Trying to make astropy FITS WCS object")
self.wcs = pywcs.WCS(self.header, relax=True)
self.logger.debug("made astropy wcs object")
else:
self.logger.debug("reused nddata wcs object")
self.wcs = ndd.wcs
self.coordsys = common.get_coord_system_name(self.header)
self.logger.debug("Coordinate system is: %s" % (self.coordsys))
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def spectral_coord(self, idxs, coords='data'):
if coords == 'data':
origin = 0
else:
origin = 1
pixcrd = np.array([idxs], np.float_)
try:
sky = self.wcs.all_pix2world(pixcrd, origin)
return float(sky[0, 2])
except Exception as e:
self.logger.error(
"Error calculating spectral coordinate: %s" % (str(e)))
raise common.WCSError(e)
def pixtoradec(self, idxs, coords='data'):
if coords == 'data':
origin = 0
else:
origin = 1
pixcrd = np.array([idxs], np.float_)
try:
# sky = self.wcs.wcs_pix2sky(pixcrd, origin)
# sky = self.wcs.all_pix2sky(pixcrd, origin)
# astropy only?
sky = self.wcs.all_pix2world(pixcrd, origin)
except Exception as e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise common.WCSError(e)
ra_deg = float(sky[0, 0])
dec_deg = float(sky[0, 1])
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
if coords == 'data':
origin = 0
else:
origin = 1
args = [ra_deg, dec_deg]
if naxispath:
args += [0] * len(naxispath)
skycrd = np.array([args], np.float_)
try:
pix = self.wcs.all_world2pix(skycrd, origin, maxiter=20,
detect_divergence=True, quiet=False)
except pywcs.NoConvergence as e:
pix = e.best_solution
except Exception as e:
self.logger.error("Error calculating radectopix: %s" % (str(e)))
raise common.WCSError(e)
x = float(pix[0, 0])
y = float(pix[0, 1])
return (x, y)
def pixtocoords(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise common.WCSError("No usable WCS")
if system is None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
frame_class = coordinates.frame_transform_graph.lookup_name(
self.coordsys)
coord = frame_class(ra_deg * units.degree, dec_deg * units.degree)
to_class = coordinates.frame_transform_graph.lookup_name(system)
# Skip in input and output is the same (no realize_frame
# call in astropy)
if to_class != frame_class:
coord = coord.transform_to(common.get_astropy_frame(to_class))
return coord
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'pixel':
return self.pixtoradec(idxs, coords=coords)
c = self.pixtocoords(idxs, system=system, coords=coords)
r = c.data.represent_as(coordinates.UnitSphericalRepresentation)
return r.lon.deg, r.lat.deg
def datapt_to_wcspt(self, datapt, coords='data', naxispath=None):
if coords == 'data':
origin = 0
else:
origin = 1
if naxispath is not None:
n = len(naxispath)
if n > 0:
datapt = np.hstack((datapt, np.zeros((len(datapt), n))))
try:
wcspt = self.wcs.all_pix2world(datapt, origin)
except Exception as e:
self.logger.error(
"Error calculating datapt_to_wcspt: %s" % (str(e)))
raise common.WCSError(e)
return wcspt
def wcspt_to_datapt(self, wcspt, coords='data', naxispath=None):
if coords == 'data':
origin = 0
else:
origin = 1
if naxispath is not None:
n = len(naxispath)
if n > 0:
wcspt = np.hstack((wcspt, np.zeros((len(wcspt), n))))
try:
datapt = self.wcs.all_world2pix(wcspt, origin, maxiter=20,
detect_divergence=True, quiet=False)
except pywcs.NoConvergence as e:
datapt = e.best_solution
except Exception as e:
self.logger.error(
"Error calculating wcspt_to_datapt: %s" % (str(e)))
raise common.WCSError(e)
return datapt[:, :2]
def datapt_to_system(self, datapt, system=None, coords='data',
naxispath=None):
"""
Map points to given coordinate system.
Parameters
----------
datapt : array-like
Pixel coordinates in the format of
``[[x0, y0, ...], [x1, y1, ...], ..., [xn, yn, ...]]``.
system : str or None, optional, default to 'icrs'
Coordinate system name.
coords : 'data' or None, optional, default to 'data'
Expresses whether the data coordinate is indexed from zero
naxispath : list-like or None, optional, defaults to None
A sequence defining the pixel indexes > 2D, if any
Returns
-------
coord : SkyCoord
"""
if self.coordsys == 'raw':
raise common.WCSError("No usable WCS")
if system is None:
system = 'icrs'
wcspt = self.datapt_to_wcspt(datapt, coords=coords,
naxispath=naxispath)
frame_class = coordinates.frame_transform_graph.lookup_name(
self.coordsys)
ra_deg = wcspt[:, 0]
dec_deg = wcspt[:, 1]
coord = frame_class(ra_deg * units.degree, dec_deg * units.degree)
to_class = coordinates.frame_transform_graph.lookup_name(system)
# Skip if input and output is the same (no realize_frame
# call in astropy)
if to_class != frame_class:
coord = coord.transform_to(common.get_astropy_frame(to_class))
return coord
# register our WCS with ginga
common.register_wcs('astropy', AstropyWCS, coord_types)
| bsd-3-clause | a6bf980f44848e74b99294bf0cc81bdd | 31.800766 | 80 | 0.562668 | 3.725413 | false | false | false | false |
ejeschke/ginga | ginga/rv/plugins/ColorMapPicker.py | 3 | 8312 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
The ``ColorMapPicker`` plugin is used to graphically browse and select a
colormap for a channel image viewer.
**Plugin Type: Global**
``ColorMapPicker`` is a global plugin. Only one instance can be opened.
**Usage**
Operation of the plugin is very simple: the colormaps are displayed in
the form of colorbars and labels in the main view pane of the plugin.
Click on any one of the bars to set the colormap of the currently
active channel in the viewer.
Change the channel to set the colormap on a different channel.
You can scroll vertically or use the scroll bars to move through the
colorbar samples.
.. note:: When the plugin starts for the first time, it will generate
a bitmap RGB image of colorbars and labels corresponding to
all the available colormaps. This can take a few seconds
depending on the number of colormaps installed.
Colormaps are shown with the "ramp" intensity map applied.
"""
from ginga.pilw.ImageViewPil import CanvasView
from ginga.gw import Widgets, Viewers
from ginga import GingaPlugin
from ginga import cmap, RGBMap, RGBImage
__all__ = ['ColorMapPicker']
class ColorMapPicker(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(ColorMapPicker, self).__init__(fv)
# read preferences for this plugin
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_ColorMapPicker')
self.settings.add_defaults(cbar_ht=20, cbar_wd=300, cbar_sep=10,
cbar_pan_accel=1.0)
self.settings.load(onError='silent')
self._cmht = self.settings.get('cbar_ht', 20)
self._cmwd = self.settings.get('cbar_wd', 300)
self._cmsep = self.settings.get('cbar_sep', 10)
self._cmxoff = 20
self._wd = 300
self._ht = 400
self._max_y = 0
# create a PIL viewer that we use to construct an RGB image
# containing all the possible color bars and their labels
self.p_view = CanvasView(logger=self.logger)
p_v = self.p_view
p_v.configure_surface(self._wd, self._ht)
p_v.enable_autozoom('off')
p_v.enable_autocuts('off')
p_v.set_scale_limits(1.0, 1.0)
p_v.set_pan(0, 0)
p_v.scale_to(1, 1)
p_v.cut_levels(0, 255)
p_v.set_bg(0.4, 0.4, 0.4)
# this will hold the resulting RGB image
self.r_image = RGBImage.RGBImage(logger=self.logger)
self.c_view = None
self.cm_names = []
def build_gui(self, container):
vbox = Widgets.VBox()
vbox.set_border_width(4)
vbox.set_spacing(2)
# construct an interactive viewer to view and scroll
# the RGB image, and to let the user pick the cmap
self.c_view = Viewers.CanvasView(logger=self.logger)
c_v = self.c_view
c_v.set_desired_size(self._wd, self._ht)
c_v.enable_autozoom('off')
c_v.enable_autocuts('off')
c_v.set_pan(0, 0)
c_v.scale_to(1, 1)
c_v.transform(False, True, False)
c_v.cut_levels(0, 255)
c_v.set_bg(0.4, 0.4, 0.4)
# for debugging
c_v.set_name('cmimage')
canvas = c_v.get_canvas()
canvas.register_for_cursor_drawing(c_v)
c_v.add_callback('cursor-down', self.select_cb)
c_v.add_callback('scroll', self.scroll_cb)
bd = c_v.get_bindings()
bd.enable_pan(True)
# disable zooming so scrolling can be used to pan up/down
bd.enable_zoom(False)
bd.enable_cmap(False)
iw = Viewers.GingaScrolledViewerWidget(c_v)
iw.resize(self._wd, self._ht)
vbox.add_widget(iw, stretch=1)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns, stretch=0)
container.add_widget(vbox, stretch=1)
def select_cb(self, viewer, event, data_x, data_y):
"""Called when the user clicks on the color bar viewer.
Calculate the index of the color bar they clicked on and
set that color map in the current channel viewer.
"""
if not (self._cmxoff <= data_x < self._cmwd):
# need to click within the width of the bar
return
i = int(data_y / (self._cmht + self._cmsep))
if 0 <= i < len(self.cm_names):
name = self.cm_names[i]
msg = "cmap => '%s'" % (name)
self.logger.info(msg)
channel = self.fv.get_channel_info()
if channel is not None:
viewer = channel.fitsimage
#viewer.onscreen_message(msg, delay=0.5)
viewer.set_color_map(name)
def scroll_cb(self, viewer, direction, amt, data_x, data_y):
"""Called when the user scrolls in the color bar viewer.
Pan up or down to show additional bars.
"""
bd = viewer.get_bindings()
direction = bd.get_direction(direction)
pan_x, pan_y = viewer.get_pan()[:2]
qty = self._cmsep * amt * self.settings.get('cbar_pan_accel', 1.0)
if direction == 'up':
pan_y -= qty
else:
pan_y += qty
pan_y = min(max(pan_y, 0), self._max_y)
viewer.set_pan(pan_x, pan_y)
def rebuild_cmaps(self):
"""Builds a color RGB image containing color bars of all the
possible color maps and their labels.
"""
self.logger.info("building color maps image")
ht, wd, sep = self._cmht, self._cmwd, self._cmsep
viewer = self.p_view
# put the canvas into pick mode
canvas = viewer.get_canvas()
canvas.delete_all_objects()
# get the list of color maps
cm_names = self.cm_names
num_cmaps = len(cm_names)
viewer.configure_surface(500, (ht + sep) * num_cmaps)
# create a bunch of color bars and make one large compound object
# with callbacks for clicking on individual color bars
l2 = []
ColorBar = canvas.get_draw_class('drawablecolorbar')
Text = canvas.get_draw_class('text')
#ch_rgbmap = chviewer.get_rgbmap()
#dist = ch_rgbmap.get_dist()
dist = None
#imap = ch_rgbmap.get_imap()
logger = viewer.get_logger()
for i, name in enumerate(cm_names):
rgbmap = RGBMap.RGBMapper(logger, dist=dist)
rgbmap.set_cmap(cmap.get_cmap(name))
#rgbmap.set_imap(imap)
x1, y1 = self._cmxoff, i * (ht + sep)
x2, y2 = x1 + wd, y1 + ht
cbar = ColorBar(x1, y1, x2, y2, cm_name=name, showrange=False,
rgbmap=rgbmap, coord='window')
l2.append(cbar)
l2.append(Text(x2 + sep, y2, name, color='white', fontsize=16,
coord='window'))
Compound = canvas.get_draw_class('compoundobject')
obj = Compound(*l2)
canvas.add(obj)
self._max_y = y2
rgb_img = self.p_view.get_image_as_array()
self.r_image.set_data(rgb_img)
# CALLBACKS
def start(self):
if len(self.cm_names) == 0:
self.cm_names = list(cmap.get_names())
self.c_view.onscreen_message("building color maps...")
self.fv.update_pending()
self.rebuild_cmaps()
self.c_view.onscreen_message(None)
self.c_view.set_image(self.r_image)
def close(self):
self.fv.stop_global_plugin(str(self))
return True
def __str__(self):
return 'colormappicker'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_ColorMapPicker', package='ginga')
# END
| bsd-3-clause | c4801a571957b5506c70a28f43a7aa6f | 33.489627 | 77 | 0.595765 | 3.463333 | false | false | false | false |
ejeschke/ginga | ginga/gw/PlotView.py | 3 | 11292 | #
# PlotView.py -- base class for plot viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import logging
import numpy as np
from ginga.misc import Callback, Settings
from ginga import AstroImage
from ginga.gw import Widgets
try:
from ginga.gw import Plot
from ginga.util import plots
have_mpl = True
except ImportError:
have_mpl = False
class PlotViewGw(Callback.Callbacks):
"""A Ginga viewer for displaying 2D plots using matplotlib.
"""
vname = 'Ginga Plot'
vtypes = [AstroImage.AstroImage]
@classmethod
def viewable(cls, dataobj):
"""Test whether `dataobj` is viewable by this viewer."""
if not isinstance(dataobj, AstroImage.AstroImage):
return False
shp = list(dataobj.shape)
if 0 in shp or len(shp) != 1:
return False
return True
def __init__(self, logger=None, settings=None):
Callback.Callbacks.__init__(self)
if logger is not None:
self.logger = logger
else:
self.logger = logging.Logger('PlotView')
# Create settings and set defaults
if settings is None:
settings = Settings.SettingGroup(logger=self.logger)
self.settings = settings
self.settings.add_defaults(plot_bg='white', show_marker=False,
linewidth=1, linestyle='-',
linecolor='blue', markersize=6,
markerwidth=0.5, markercolor='red',
markerstyle='o', file_suffix='.png')
# for debugging
self.name = str(self)
if not have_mpl:
raise ImportError('Install matplotlib to use this plugin')
top = Widgets.VBox()
top.set_border_width(4)
self.line_plot = plots.Plot(logger=self.logger,
width=400, height=400)
bg = self.settings.get('plot_bg', 'white')
self.line_plot.add_axis(facecolor=bg)
self.plot_w = Plot.PlotWidget(self.line_plot)
self.plot_w.resize(400, 400)
# enable interactivity in the plot
self.line_plot.connect_ui()
self.line_plot.enable(zoom=True, pan=True)
self.line_plot.add_callback('limits-set', self.limits_cb)
ax = self.line_plot.ax
ax.grid(True)
top.add_widget(self.plot_w, stretch=1)
captions = (('Log X', 'checkbutton', 'Log Y', 'checkbutton',
'Show Marker', 'checkbutton'),
('X Low:', 'label', 'x_lo', 'entry',
'X High:', 'label', 'x_hi', 'entry',
'Reset X', 'button'),
('Y Low:', 'label', 'y_lo', 'entry',
'Y High:', 'label', 'y_hi', 'entry',
'Reset Y', 'button'),
('Save', 'button'))
# for now...
orientation = 'vertical'
w, b = Widgets.build_info(captions, orientation=orientation)
self.w = b
top.add_widget(w, stretch=0)
b.log_x.set_state(self.line_plot.logx)
b.log_x.add_callback('activated', self.log_x_cb)
b.log_x.set_tooltip('Plot X-axis in log scale')
b.log_y.set_state(self.line_plot.logy)
b.log_y.add_callback('activated', self.log_y_cb)
b.log_y.set_tooltip('Plot Y-axis in log scale')
b.x_lo.add_callback('activated', lambda w: self.set_xlim_cb())
b.x_lo.set_tooltip('Set X lower limit')
b.x_hi.add_callback('activated', lambda w: self.set_xlim_cb())
b.x_hi.set_tooltip('Set X upper limit')
b.y_lo.add_callback('activated', lambda w: self.set_ylim_cb())
b.y_lo.set_tooltip('Set Y lower limit')
b.y_hi.add_callback('activated', lambda w: self.set_ylim_cb())
b.y_hi.set_tooltip('Set Y upper limit')
b.reset_x.add_callback('activated', lambda w: self.reset_xlim_cb())
b.reset_x.set_tooltip('Autoscale X limits')
b.reset_y.add_callback('activated', lambda w: self.reset_ylim_cb())
b.reset_y.set_tooltip('Autoscale Y limits')
b.show_marker.set_state(self.settings.get('show_marker', False))
b.show_marker.add_callback('activated', self.set_marker_cb)
b.show_marker.set_tooltip('Mark data points')
# Button to save plot
self.save_plot = b.save
self.save_plot.set_tooltip('Save table plot')
self.save_plot.add_callback('activated', lambda w: self.save_cb())
self.save_plot.set_enabled(False)
self.widget = top
# For callbacks
for name in ['image-set']:
self.enable_callback(name)
def get_widget(self):
return self.widget
def get_settings(self):
return self.settings
def get_logger(self):
return self.logger
def clear(self):
self.widget.clear()
def initialize_channel(self, fv, channel):
# no housekeeping to do (for now) on our part, just override to
# suppress the logger warning
pass
def set_dataobj(self, dataobj):
if not self.viewable(dataobj):
raise ValueError("Can't display this data object")
self._dataobj = dataobj
self.do_plot(reset_xlimits=True, reset_ylimits=True)
self.make_callback('image-set', dataobj)
def get_dataobj(self):
return self._dataobj
def clear_data(self):
"""Clear comboboxes and columns."""
self.w.x_lo.set_text('')
self.w.x_hi.set_text('')
self.w.y_lo.set_text('')
self.w.y_hi.set_text('')
def clear_plot(self):
"""Clear plot display."""
self.line_plot.clear()
self.line_plot.draw()
self.save_plot.set_enabled(False)
def do_plot(self, reset_xlimits=True, reset_ylimits=True):
"""Simple line plot."""
self.clear_plot()
if self._dataobj is None: # No data to plot
return
plt_kw = {
'lw': self.settings.get('linewidth', 1),
'ls': self.settings.get('linestyle', '-'),
'color': self.settings.get('linecolor', 'blue'),
'ms': self.settings.get('markersize', 6),
'mew': self.settings.get('markerwidth', 0.5),
'mfc': self.settings.get('markercolor', 'red')}
plt_kw['mec'] = plt_kw['mfc']
try:
x_data, y_data = self.get_plot_data()
marker = self.get_marker()
self.line_plot.plot(
x_data, y_data,
xtitle=self.get_label('x'), ytitle=self.get_label('y'),
marker=marker, **plt_kw)
if not reset_xlimits:
self.set_xlim_cb()
self.set_xlimits_widgets()
if not reset_ylimits:
self.set_ylim_cb()
self.set_ylimits_widgets()
except Exception as e:
self.logger.error(str(e))
else:
self.save_plot.set_enabled(True)
def set_xlimits_widgets(self, set_min=True, set_max=True):
"""Populate axis limits GUI with current plot values."""
xmin, xmax = self.line_plot.ax.get_xlim()
if set_min:
self.w.x_lo.set_text('{0}'.format(xmin))
if set_max:
self.w.x_hi.set_text('{0}'.format(xmax))
def set_ylimits_widgets(self, set_min=True, set_max=True):
"""Populate axis limits GUI with current plot values."""
ymin, ymax = self.line_plot.ax.get_ylim()
if set_min:
self.w.y_lo.set_text('{0}'.format(ymin))
if set_max:
self.w.y_hi.set_text('{0}'.format(ymax))
def limits_cb(self, plot, dct):
"""Callback that is called when the limits are set by the
plot object.
"""
self.set_xlimits_widgets()
self.set_ylimits_widgets()
def get_plot_data(self):
"""Extract only good data point for plotting."""
y_data = self._dataobj.get_data()
x_data = np.arange(len(y_data))
return x_data, y_data
def get_marker(self):
_marker_type = self.settings.get('markerstyle', 'o')
if not self.w.show_marker.get_state():
_marker_type = None
return _marker_type
def get_label(self, axis):
"""Return plot label for the given axis."""
if axis == 'x':
label = 'Index'
if axis == 'y':
label = 'Value'
return label
def log_x_cb(self, w, val):
"""Toggle linear/log scale for X-axis."""
self.line_plot.logx = val
self.do_plot()
def log_y_cb(self, w, val):
"""Toggle linear/log scale for Y-axis."""
self.line_plot.logy = val
self.do_plot()
def set_xlim_cb(self, redraw=True):
"""Set plot limit based on user values."""
try:
xmin = float(self.w.x_lo.get_text())
except Exception:
set_min = True
else:
set_min = False
try:
xmax = float(self.w.x_hi.get_text())
except Exception:
set_max = True
else:
set_max = False
if set_min or set_max:
self.line_plot.draw()
self.set_xlimits_widgets(set_min=set_min, set_max=set_max)
if not (set_min and set_max):
self.line_plot.ax.set_xlim(xmin, xmax)
if redraw:
self.line_plot.draw()
def set_ylim_cb(self, redraw=True):
"""Set plot limit based on user values."""
try:
ymin = float(self.w.y_lo.get_text())
except Exception:
set_min = True
else:
set_min = False
try:
ymax = float(self.w.y_hi.get_text())
except Exception:
set_max = True
else:
set_max = False
if set_min or set_max:
self.line_plot.draw()
self.set_ylimits_widgets(set_min=set_min, set_max=set_max)
if not (set_min and set_max):
self.line_plot.ax.set_ylim(ymin, ymax)
if redraw:
self.line_plot.draw()
def reset_xlim_cb(self):
self.line_plot.autoscale('x')
def reset_ylim_cb(self):
self.line_plot.autoscale('y')
def set_marker_cb(self, w, val):
"""Toggle show/hide data point markers."""
self.do_plot()
def save_cb(self):
"""Save plot to file."""
# This just defines the basename.
# Extension has to be explicitly defined or things can get messy.
w = Widgets.SaveDialog(title='Save plot')
target = w.get_path()
if target is None:
# Save canceled
return
plot_ext = self.settings.get('file_suffix', '.png')
if not target.endswith(plot_ext):
target += plot_ext
# TODO: This can be a user preference?
fig_dpi = 100
try:
fig = self.line_plot.get_figure()
fig.savefig(target, dpi=fig_dpi)
except Exception as e:
self.logger.error(str(e))
else:
self.logger.info('Table plot saved as {0}'.format(target))
def __str__(self):
return "PlotViewer"
| bsd-3-clause | a496553c1e7ba81e9d2b2cb8f8daa3ca | 29.601626 | 75 | 0.547467 | 3.633205 | false | false | false | false |
ejeschke/ginga | ginga/mplw/transform.py | 2 | 10541 | #
# transform.py -- a custom projection for supporting matplotlib plotting
# on ginga
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
# NOTE: this code is based on "custom_projection_example.py", an example
# script developed by matplotlib developers
# See http://matplotlib.org/examples/api/custom_projection_example.html
#
import matplotlib
from matplotlib.axes import Axes
from matplotlib.path import Path
from matplotlib.transforms import BboxTransformTo, Transform
from matplotlib.projections import register_projection
from ginga.Bindings import PointEvent
class GingaAxes(Axes):
"""
This is a custom matplotlib projection to support matplotlib plotting
on a ginga-rendered image in a matplotlib Figure.
This code is based on 'custom_projection_example.py', an example
script developed by matplotlib developers.
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='ginga')``.
name = 'ginga'
def __init__(self, *args, **kwargs):
# this is the Ginga object
self.viewer = kwargs.pop('viewer', None)
Axes.__init__(self, *args, **kwargs)
## self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def set_viewer(self, viewer):
self.viewer = viewer
self.transData.viewer = viewer
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space to axes space. It is separated into a non-affine
# and affine part so that the non-affine part does not have to be
# recomputed when a simple affine change to the figure has been
# made (such as resizing the window or changing the dpi).
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
#self.transData = \
# self.transProjection + self.transAffine + self.transAxes
self.transData = self.GingaTransform()
self.transData.viewer = self.viewer
# self._xaxis_transform = blended_transform_factory(
# self.transData, self.transAxes)
# self._yaxis_transform = blended_transform_factory(
# self.transAxes, self.transData)
self._xaxis_transform = self.transData
self._yaxis_transform = self.transData
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. This also
# applies to interactive panning and zooming in the GUI interfaces.
## def set_xlim(self, *args, **kwargs):
## print "Setting xlim!", args
## def set_ylim(self, *args, **kwargs):
## print "Setting ylim!", args
def format_coord(self, x, y):
"""
Override this method to change how the values are displayed in
the status bar.
"""
return 'x=%f, y=%f' % (x, y)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
# TODO: get zoom box working
return False
def can_pan(self):
"""
Return True if this axes support the zoom box
"""
return True
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
bd = self.viewer.get_bindings()
mode = bd.get_mode_obj('pan')
data_x, data_y = self.viewer.get_data_xy(x, y)
event = PointEvent(button=button, state='down',
data_x=data_x, data_y=data_y,
viewer=self.viewer)
if button == 1:
mode.ms_pan(self.viewer, event, data_x, data_y)
elif button == 3:
mode.ms_zoom(self.viewer, event, data_x, data_y)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
pass
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
bd = self.viewer.get_bindings()
mode = bd.get_mode_obj('pan')
data_x, data_y = self.viewer.get_data_xy(x, y)
event = PointEvent(button=button, state='move',
data_x=data_x, data_y=data_y,
viewer=self.viewer)
if button == 1:
mode.ms_pan(self.viewer, event, data_x, data_y)
elif button == 3:
mode.ms_zoom(self.viewer, event, data_x, data_y)
# Now, the transforms themselves.
class GingaTransform(Transform):
"""
The base Ginga transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
has_inverse = True
viewer = None
#pass_through = True
def invalidate(self):
#print("I don't feel validated! (%s)" % (self.pass_through))
return Transform.invalidate(self)
def transform_non_affine(self, xy):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
if self.viewer is None:
return xy
tr = self.viewer.tform['data_to_native']
res = tr.to_(xy)
return res
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
# Note: For compatibility with matplotlib v1.1 and older, you'll
# need to explicitly implement a ``transform`` method as well.
# Otherwise a ``NotImplementedError`` will be raised. This isn't
# necessary for v1.2 and newer, however.
transform = transform_non_affine
# Similarly, we need to explicitly override ``transform_path`` if
# compatibility with older matplotlib versions is needed. With v1.2
# and newer, only overriding the ``transform_path_non_affine``
# method is sufficient.
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
tform = GingaAxes.InvertedGingaTransform()
tform.viewer = self.viewer
return tform
inverted.__doc__ = Transform.inverted.__doc__
class InvertedGingaTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
has_inverse = True
viewer = None
def transform_non_affine(self, xy):
if self.viewer is None:
return xy
tr = self.viewer.tform['data_to_native']
res = tr.from_(xy)
return res
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
# As before, we need to implement the "transform" method for
# compatibility with matplotlib v1.1 and older.
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
tform = GingaAxes.GingaTransform()
tform.viewer = self.viewer
return tform
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(GingaAxes)
# END
| bsd-3-clause | cc04ef4afe78911a7ae70606a9c94363 | 32.893891 | 79 | 0.592923 | 4.194588 | false | false | false | false |
ejeschke/ginga | ginga/rv/plugins/PixTable.py | 2 | 25507 | #
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
``PixTable`` provides a way to check or monitor the pixel values in
a region.
**Plugin Type: Local**
``PixTable`` is a local plugin, which means it is associated with a channel.
An instance can be opened for each channel.
**Basic Use**
In the most basic use, simply move the cursor around the channel
viewer; an array of pixel values will appear in the "Pixel Values"
display in the plugin UI. The center value is highlighted, and this
corresponds to the value under the cursor.
You can choose a 3x3, 5x5, 7x7, or 9x9 grid from the left-most
combobox control. It may help to adjust the "Font Size" control
to prevent having the array values cut off on the sides. You can
also enlarge the plugin workspace to see more of the table.
.. note:: The order of the value table shown will not necessarily match to
the channel viewer if the images is flipped, transposed, or rotated.
**Using Marks**
When you set and select a mark, the pixel values will be shown
surrounding the mark instead of the cursor. There can be any number
of marks, and they are each noted with a numbered "X". Simply change the
mark drop down control to select a different mark and see the values
around it. The currently selected mark is shown with a different color
than the others.
The marks will stay in position even if a new image is loaded and
they will show the values for the new image. In this way you can
monitor the area around a spot if the image is updating frequently.
If the "Pan to mark" checkbox is selected, then when you select a
different mark from the mark control, the channel viewer will pan to
that mark. This can be useful to inspect the same spots in several
different images, especially when zoomed in tight to the image.
.. note:: If you change the mark control back to "None", then the pixel
table will again update as you move the cursor around the viewer.
The "Caption" box can be used to set a text annotation that will be
appended to the mark label when the next mark is created. This can be
used to label a feature in the image, for example.
**Deleting Marks**
To delete a mark, select it in the mark control and then press the
button marked "Delete". To delete all the marks, press the button
marked "Delete All".
**Moving Marks**
When the "Move" radio button is checked, and a mark is selected, then
clicking or dragging anywhere in the image will move the mark to that
location and update the pixel table. If no mark is currently selected
then a new one will be created and moved.
**Drawing Marks**
When the "Draw" radio button is checked, then clicking and dragging creates
a new mark. The longer the draw, the bigger radius of the "X".
**Editing Marks**
When the "Edit" radio button is checked after a mark has been selected then
you can drag the control points of the mark to increase the radius of the
arms of the X or you can drag the bounding box to move the mark. If the
editing control points are not shown, simply click on the center of a mark
to enable them.
**Special Keys**
In "Move" mode the following keys are active:
- "n" will place a new mark at the site of the cursor
- "m" will move the current mark (if any) to the site of the cursor
- "d" will delete the current mark (if any)
- "j" will select the previous mark (if any)
- "k" will select the next mark (if any)
**User Configuration**
"""
import numpy as np
from ginga.gw import Widgets, Viewers
from ginga import GingaPlugin, colors
__all__ = ['PixTable']
class PixTable(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(PixTable, self).__init__(fv, fitsimage)
self.layertag = 'pixtable-canvas'
self.pan2mark = False
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_PixTable')
self.settings.add_defaults(fontsize=10,
font='fixed',
mark_radius=10,
mark_style='cross',
mark_color='lightgreen',
select_color='cyan',
drag_update=True)
self.settings.load(onError='silent')
self.dc = self.fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
self.fitsimage.set_callback('cursor-changed', self.cursor_cb)
canvas.enable_draw(True)
canvas.set_drawtype('point', color='cyan', linestyle='dash')
canvas.set_callback('draw-event', self.draw_cb)
canvas.enable_edit(True)
canvas.set_callback('edit-event', self.edit_cb)
canvas.add_draw_mode('move', down=self.btndown_cb,
move=self.motion_cb, up=self.btnup_cb,
key=self.keydown_cb)
canvas.set_draw_mode('move')
canvas.register_for_cursor_drawing(self.fitsimage)
canvas.set_surface(self.fitsimage)
self.canvas = canvas
# For pixel table
self.pixtbl_radius = 2
self.txt_arr = None
self.sum_arr = None
self.sizes = [1, 2, 3, 4]
self.maxdigits = 9
self.fmt_cell = f'^{self.maxdigits}.4g'
self.lastx = 0
self.lasty = 0
self.font = self.settings.get('font', 'fixed')
self.fontsize = self.settings.get('fontsize', 12)
self.fontsizes = [6, 8, 9, 10, 11, 12, 14, 16, 18, 24, 28, 32]
self.pixview = None
self._wd = 400
self._ht = 300
# hack to set a reasonable starting position for the splitter
_sz = max(self._wd, self._ht)
self._split_sizes = [_sz, _sz]
self.gui_up = False
# For "marks" feature
self.mark_radius = self.settings.get('mark_radius', 10)
self.mark_style = self.settings.get('mark_style', 'cross')
self.mark_color = self.settings.get('mark_color', 'lightgreen')
self.select_color = self.settings.get('select_color', 'cyan')
self.marks = ['None']
self.mark_index = 0
self.mark_selected = None
self.drag_update = self.settings.get('drag_update', True)
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
box, sw, orientation = Widgets.get_oriented_box(container,
orientation=self.settings.get('orientation', None))
box.set_border_width(4)
box.set_spacing(2)
fr = Widgets.Frame("Pixel Values")
# We just use a ginga widget to implement the pixtable
pixview = Viewers.CanvasView(logger=self.logger)
pixview.set_desired_size(self._wd, self._ht)
bg = colors.lookup_color('#202030')
pixview.set_bg(*bg)
bd = pixview.get_bindings()
bd.enable_zoom(True)
bd.enable_pan(True)
self.pixview = pixview
self.pix_w = Viewers.GingaViewerWidget(pixview)
fr.set_widget(self.pix_w)
self.pix_w.resize(self._wd, self._ht)
paned = Widgets.Splitter(orientation=orientation)
self.w.splitter = paned
paned.add_widget(fr)
self._rebuild_table()
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
cbox1 = Widgets.ComboBox()
index = 0
for i in self.sizes:
j = 1 + i * 2
name = "%dx%d" % (j, j)
cbox1.append_text(name)
index += 1
index = self.sizes.index(self.pixtbl_radius)
cbox1.set_index(index)
cbox1.add_callback('activated', self.set_cutout_size_cb)
cbox1.set_tooltip("Select size of pixel table")
btns.add_widget(cbox1, stretch=0)
# control for selecting a mark
cbox2 = Widgets.ComboBox()
for tag in self.marks:
cbox2.append_text(tag)
if self.mark_selected is None:
cbox2.set_index(0)
else:
cbox2.show_text(self.mark_selected)
cbox2.add_callback('activated', self.mark_select_cb)
self.w.marks = cbox2
cbox2.set_tooltip("Select a mark")
btns.add_widget(cbox2, stretch=0)
btn1 = Widgets.Button("Delete")
btn1.add_callback('activated', lambda w: self.clear_mark_cb())
btn1.set_tooltip("Delete selected mark")
btn1.set_enabled(len(self.marks) > 1)
self.w.btn_delete = btn1
btns.add_widget(btn1, stretch=0)
btn2 = Widgets.Button("Delete All")
btn2.add_callback('activated', lambda w: self.clear_all())
btn2.set_tooltip("Clear all marks")
btns.add_widget(btn2, stretch=0)
btn2.set_enabled(len(self.marks) > 1)
self.w.btn_delete_all = btn2
btns.add_widget(Widgets.Label(''), stretch=1)
vbox2 = Widgets.VBox()
vbox2.add_widget(btns, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn3 = Widgets.CheckBox("Pan to mark")
btn3.set_state(self.pan2mark)
btn3.add_callback('activated', self.pan2mark_cb)
btn3.set_tooltip("Pan follows selected mark")
btns.add_widget(btn3)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox2.add_widget(btns, stretch=0)
captions = [
('Font size:', 'label', 'Font size', 'combobox',
'Caption:', 'label', 'Caption', 'entry'),
]
w, b = Widgets.build_info(captions)
self.w.update(b)
vbox2.add_widget(w, stretch=0)
b.font_size.set_tooltip("Set font size for pixel display")
for size in self.fontsizes:
b.font_size.append_text(str(size))
b.font_size.show_text(str(self.fontsize))
b.font_size.add_callback('activated', self.set_font_size_cb)
b.caption.set_tooltip("Text to append to the marker")
vbox2.add_widget(Widgets.Label(''), stretch=1)
box.add_widget(vbox2, stretch=1)
paned.add_widget(sw)
paned.set_sizes(self._split_sizes)
top.add_widget(paned, stretch=1)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Move")
btn1.set_state(mode == 'move')
btn1.add_callback('activated',
lambda w, val: self.set_mode_cb('move', val))
btn1.set_tooltip("Choose this to add or move a mark")
self.w.btn_move = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Draw", group=btn1)
btn2.set_state(mode == 'draw')
btn2.add_callback('activated',
lambda w, val: self.set_mode_cb('draw', val))
btn2.set_tooltip("Choose this to draw a new or replacement mark")
self.w.btn_draw = btn2
hbox.add_widget(btn2)
btn3 = Widgets.RadioButton("Edit", group=btn1)
btn3.set_state(mode == 'edit')
btn3.add_callback('activated',
lambda w, val: self.set_mode_cb('edit', val))
btn3.set_tooltip("Choose this to edit a mark")
self.w.btn_edit = btn3
hbox.add_widget(btn3)
hbox.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(hbox, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def select_mark(self, tag, pan=True):
# deselect the current selected mark, if there is one
if self.mark_selected is not None:
try:
obj = self.canvas.get_object_by_tag(self.mark_selected)
obj.set_attr_all(color=self.mark_color)
except Exception:
# old object may have been deleted
pass
self.mark_selected = tag
if tag is None:
self.w.marks.show_text('None')
self.canvas.redraw(whence=3)
return
self.w.marks.show_text(tag)
obj = self.canvas.get_object_by_tag(tag)
obj.set_attr_all(color=self.select_color)
self.lastx = obj.objects[0].x
self.lasty = obj.objects[0].y
if self.pan2mark and pan:
self.fitsimage.panset_xy(self.lastx, self.lasty)
self.canvas.redraw(whence=3)
self.redo()
def mark_select_cb(self, w, index):
tag = self.marks[index]
if index == 0:
tag = None
self.select_mark(tag)
def pan2mark_cb(self, w, val):
self.pan2mark = val
def clear_mark_cb(self):
tag = self.mark_selected
if tag is None:
return
self.canvas.delete_object_by_tag(tag)
self.w.marks.delete_alpha(tag)
self.marks.remove(tag)
self.w.marks.set_index(0)
self.mark_selected = None
self.w.btn_delete.set_enabled(len(self.marks) > 1)
self.w.btn_delete_all.set_enabled(len(self.marks) > 1)
def clear_all(self):
self.canvas.delete_all_objects()
for name in self.marks:
self.w.marks.delete_alpha(name)
self.marks = ['None']
self.w.marks.append_text('None')
self.w.marks.set_index(0)
self.mark_selected = None
self.mark_index = 0
self.w.btn_delete.set_enabled(False)
self.w.btn_delete_all.set_enabled(False)
def set_font_size_cb(self, w, index):
self.fontsize = self.fontsizes[index]
self._rebuild_table()
self.redo()
def plot(self, data, x1, y1, x2, y2, data_x, data_y, radius,
maxv=9):
# Because most FITS data is stored with lower Y indexes to
# bottom
data = np.flipud(data)
width, height = self.fitsimage.get_dims(data)
if self.txt_arr is None:
return
if data.shape != self.txt_arr.shape:
return
maxval = np.nanmax(data)
minval = np.nanmin(data)
avgval = np.mean(data)
rmsval = np.sqrt(np.mean(np.square(data)))
medianval = np.median(data)
sumval = np.nansum(data)
fmt_cell = self.fmt_cell
def _vecfunc(val, out):
if not np.isscalar(val):
val = np.average(val)
out.text = f'{val:{fmt_cell}}'
func = np.vectorize(_vecfunc)
func(data, self.txt_arr)
ctr_txt = self.txt_arr[width // 2][height // 2]
# Report statistics
self.sum_arr[0].text = f"Min: {minval:{fmt_cell}} Mean: {avgval:{fmt_cell}} Median: {medianval:{fmt_cell}}"
self.sum_arr[1].text = f"Max: {maxval:{fmt_cell}} RMS: {rmsval:{fmt_cell}} Sum: {sumval:{fmt_cell}}"
# update the pixtable
self.pixview.panset_xy(ctr_txt.x, ctr_txt.y)
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
# insert layer if it is not already
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add canvas layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def stop(self):
self.gui_up = False
self._split_sizes = self.w.splitter.get_sizes()
# remove the canvas from the image
self.canvas.ui_set_active(False)
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except Exception:
pass
self.pixview = None
def pause(self):
self.canvas.ui_set_active(False)
def resume(self):
# turn off any mode user may be in
self.modes_off()
self.canvas.ui_set_active(True, viewer=self.fitsimage)
self.redo()
def redo(self):
if self.pixview is None:
return
# cut out and set the pixel table data
image = self.fitsimage.get_vip()
if image is None:
return
# We report the value across the pixel, even though the coords
# change halfway across the pixel
px_off = self.fitsimage.data_off
data_x, data_y = (int(np.floor(self.lastx + px_off)),
int(np.floor(self.lasty + px_off)))
# cutout image data
data, x1, y1, x2, y2 = image.cutout_radius(data_x, data_y,
self.pixtbl_radius)
self.fv.error_wrap(self.plot, data, x1, y1, x2, y2,
self.lastx, self.lasty,
self.pixtbl_radius, maxv=9)
def _rebuild_table(self):
canvas = self.pixview.get_canvas()
canvas.delete_all_objects(redraw=False)
Text = canvas.get_draw_class('text')
ex_txt = Text(0, 0, text='5', fontsize=self.fontsize, font=self.font)
font_wd, font_ht = self.fitsimage.renderer.get_dimensions(ex_txt)
max_wd = self.maxdigits + 2
crdmap = self.pixview.get_coordmap('window')
rows = []
objs = []
max_cx = 0
x_offset = 6
y_offset = 4
for row in range(self.pixtbl_radius * 2 + 1):
cols = []
for col in range(self.pixtbl_radius * 2 + 1):
col_wd = font_wd * max_wd
cx = col_wd * col + x_offset
max_cx = max(max_cx, cx + col_wd)
cy = font_ht * (row + 1) + y_offset
color = 'lightgreen'
if (row == col) and (row == self.pixtbl_radius):
color = 'pink'
text_obj = Text(cx, cy, text='', font=self.font,
color=color, fontsize=self.fontsize,
coord='window')
objs.append(text_obj)
cols.append(text_obj)
rows.append(cols)
self.txt_arr = np.array(rows)
# add summary row(s)
cx = (font_wd + 2) + x_offset
cy += font_ht + 20
s1 = Text(cx, cy, text='', font=self.font,
color='cyan', fontsize=self.fontsize,
coord='window')
objs.append(s1)
cy += font_ht + y_offset
s2 = Text(cx, cy, text='', font=self.font,
color='cyan', fontsize=self.fontsize,
coord='window')
objs.append(s2)
self.sum_arr = np.array([s1, s2])
# add all of the text objects to the canvas as one large
# compound object
CompoundObject = canvas.get_draw_class('compoundobject')
canvas.add(CompoundObject(*objs), redraw=False)
# set limits for scrolling
self.pixview.set_limits(((0, 0), (max_cx, cy)), coord='window')
def set_cutout_size_cb(self, w, val):
index = w.get_index()
self.pixtbl_radius = self.sizes[index]
self._rebuild_table()
self.redo()
def cursor_cb(self, canvas, junk, data_x, data_y):
if not self.gui_up:
return
if self.mark_selected is not None:
return False
if self.pixview is None:
return
self.lastx, self.lasty = data_x, data_y
self.redo()
return False
def add_mark(self, data_x, data_y, radius=None, color=None, style=None,
text=None):
if not radius:
radius = self.mark_radius
if not color:
color = self.mark_color
if not style:
style = self.mark_style
self.logger.debug("Setting mark at %d,%d" % (data_x, data_y))
self.mark_index += 1
tag = 'mark%d' % (self.mark_index)
caption = "%d" % (self.mark_index)
if text is not None:
caption = caption + ': ' + text
if radius is None:
radius = self.mark_radius
pt_obj = self.dc.Point(data_x, data_y, radius,
style=style, color=color,
linestyle='solid')
txt_obj = self.dc.Text(10, 0, caption,
font=self.font, fontsize=self.fontsize,
color=color, ref_obj=pt_obj, coord='offset')
txt_obj.editable = False
tag = self.canvas.add(self.dc.CompoundObject(pt_obj, txt_obj),
tag=tag)
self.marks.append(tag)
self.w.marks.append_text(tag)
self.w.btn_delete.set_enabled(True)
self.w.btn_delete_all.set_enabled(True)
self.select_mark(tag, pan=False)
def _mark_update(self, data_x, data_y):
if self.mark_selected is None:
return False
m_obj = self.canvas.get_object_by_tag(self.mark_selected)
p_obj = m_obj.objects[0]
p_obj.move_to_pt((data_x, data_y))
self.lastx, self.lasty = data_x, data_y
self.canvas.update_canvas()
return True
def btndown_cb(self, canvas, event, data_x, data_y, viewer):
if self._mark_update(data_x, data_y):
if self.drag_update:
self.redo()
return True
# no selected mark, make a new one
caption = self.w.caption.get_text().strip()
if len(caption) == 0:
caption = None
self.add_mark(data_x, data_y, text=caption)
return True
def motion_cb(self, canvas, event, data_x, data_y, viewer):
if not self._mark_update(data_x, data_y):
return False
if self.drag_update:
self.redo()
return True
def btnup_cb(self, canvas, event, data_x, data_y, viewer):
if not self._mark_update(data_x, data_y):
return False
self.redo()
return True
def prev_mark(self):
if len(self.marks) <= 1 or self.mark_selected is None:
# no previous
return
idx = self.marks.index(self.mark_selected)
idx = idx - 1
if idx < 0:
return
tag = self.marks[idx]
if tag == 'None':
tag = None
self.select_mark(tag)
def next_mark(self):
if len(self.marks) <= 1:
# no next
return
if self.mark_selected is None:
idx = 0
else:
idx = self.marks.index(self.mark_selected)
idx = idx + 1
if idx >= len(self.marks):
return
tag = self.marks[idx]
if tag == 'None':
tag = None
self.select_mark(tag)
def keydown_cb(self, canvas, event, data_x, data_y, viewer):
if event.key == 'n':
caption = self.w.caption.get_text().strip()
if len(caption) == 0:
caption = None
self.add_mark(data_x, data_y, text=caption)
return True
elif event.key == 'm':
if self._mark_update(data_x, data_y):
self.redo()
return True
elif event.key == 'd':
self.clear_mark_cb()
return True
elif event.key == 'j':
self.prev_mark()
return True
elif event.key == 'k':
self.next_mark()
return True
return False
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
canvas.delete_object_by_tag(tag)
caption = self.w.caption.get_text().strip()
if len(caption) == 0:
caption = None
self.add_mark(obj.x, obj.y, text=caption, radius=obj.radius)
def edit_cb(self, canvas, obj):
if self.mark_selected is not None:
m_obj = self.canvas.get_object_by_tag(self.mark_selected)
if m_obj is not None and m_obj.objects[0] is obj:
# edited mark was the selected mark
self.lastx, self.lasty = obj.x, obj.y
self.redo()
return True
def edit_select_mark(self):
if self.mark_selected is not None:
obj = self.canvas.get_object_by_tag(self.mark_selected)
# drill down to reference shape
if hasattr(obj, 'objects'):
obj = obj.objects[0]
self.canvas.edit_select(obj)
else:
self.canvas.clear_selected()
self.canvas.update_canvas()
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_mark()
return True
def set_mode(self, mode):
self.canvas.set_draw_mode(mode)
self.w.btn_move.set_state(mode == 'move')
self.w.btn_draw.set_state(mode == 'draw')
self.w.btn_edit.set_state(mode == 'edit')
def __str__(self):
return 'pixtable'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_PixTable', package='ginga')
# END
| bsd-3-clause | f30709b6cc86a063d4ae3671ec38c866 | 33.237584 | 115 | 0.573999 | 3.566415 | false | false | false | false |
ejeschke/ginga | ginga/web/pgw/Widgets.py | 3 | 121929 | #
# Widgets.py -- wrapped HTML widgets and convenience functions
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os.path
import threading
import time
import json
import asyncio
from functools import reduce
from ginga.misc import Callback, Bunch, Settings, LineHistory
from ginga.web.pgw import PgHelp
# For future support of WebView widget
has_webkit = False
__all__ = ['WidgetError', 'WidgetBase', 'TextEntry', 'TextEntrySet',
'TextArea', 'Dial', 'Label', 'Button', 'ComboBox',
'SpinBox', 'Slider', 'ScrollBar', 'CheckBox', 'ToggleButton',
'RadioButton', 'Image', 'ProgressBar', 'StatusBar', 'TreeView',
'Canvas', 'ContainerBase', 'Box', 'HBox', 'VBox', 'Frame',
'Expander', 'TabWidget', 'StackWidget', 'MDIWidget', 'ScrollArea',
'Splitter', 'GridBox', 'ToolbarAction', 'Toolbar', 'MenuAction',
'Menu', 'Menubar', 'Page', 'TopLevel', 'Application', 'Dialog',
'name_mangle', 'make_widget', 'hadjust', 'build_info', 'wrap',
'has_webkit']
class WidgetError(Exception):
"""For errors thrown in this module."""
pass
# widget id counter
widget_id = 0
# widget dict
widget_dict = {}
tab_idx = 0
# reference to the created application
_app = None
default_font = PgHelp.font_info("Arial 8")
# BASE
class WidgetBase(Callback.Callbacks):
def __init__(self):
global widget_id, widget_dict
super(WidgetBase, self).__init__()
self.widget = None
self.changed = False
# external data can be attached here
self.extdata = Bunch.Bunch()
# generic attributes of widgets
self.enabled = True
self.width = 400
self.height = 800
self.bgcolor = 'gray'
self.fgcolor = 'black'
self.tooltip = ''
widget_id += 1
self.id = widget_id
widget_dict[widget_id] = self
self.margins = (0, 0, 0, 0) # T, R, B, L
self._rendered = False
def get_url(self):
app = self.get_app()
return "%s?id=%d" % (app.base_url, self.id)
def get_app(self):
return _app
def get_widget(self):
return self.widget
def set_tooltip(self, text):
self.tooltip = text
def get_enabled(self):
return self.enabled
def set_enabled(self, tf):
self.enabled = tf
if self._rendered:
app = self.get_app()
app.do_operation('disable', id=self.id, value=not tf)
def get_size(self):
return self.width, self.height
def get_pos(self):
# TODO
return 0, 0
def delete(self):
# for now...
pass
def resize(self, width, height):
self.width, self.height = width, height
def focus(self):
pass
def show(self):
pass
def hide(self):
pass
def is_visible(self):
return getattr(self, '_rendered', True)
def get_font(self, font, size):
if PgHelp.font_regex.match(font) is None:
font = PgHelp.font_info('%s %d' % (font, size))
else:
font = PgHelp.font_info(font)
return font
def cfg_expand(self, horizontal='fixed', vertical='fixed'):
# this is for compatibility with Qt widgets
pass
def set_padding(self, top, right, bottom, left):
padding = "%dpx %dpx %dpx %dpx" % (top, right, bottom, left)
self.add_css_styles([('padding', padding)])
def set_margins(self, top, right, bottom, left):
self.margins = (top, right, bottom, left)
margin = "%dpx %dpx %dpx %dpx" % self.margins
self.add_css_styles([('margin', margin)])
def set_border_width(self, pix):
self.add_css_styles([('border-width', '%dpx' % pix)])
def get_css_classes(self, fmt=None):
classes = self.extdata.setdefault('css_classes', [])
if fmt == 'str':
classes = " ".join(classes)
return classes
def add_css_classes(self, new_classes):
# add any new classes
classes = self.get_css_classes()
classes = classes + \
list(filter(lambda t: t not in classes, new_classes))
self.extdata.css_classes = classes
def get_css_styles(self, fmt=None):
styles = self.extdata.setdefault('inline_styles', [])
if fmt == 'str':
styles = ["%s: %s" % (x, y) for x, y in styles]
styles = "; ".join(styles)
return styles
def add_css_styles(self, new_styles):
# replace any styles that are overridden and add new styles
styles = self.get_css_styles()
od = dict(styles)
nd = dict(new_styles)
styles = [(a, b) if a not in nd else (a, nd[a])
for a, b in styles] + \
list(filter(lambda t: t[0] not in od, new_styles))
self.extdata.inline_styles = styles
def call_custom_method(self, future, method_name, **kwargs):
if self._rendered:
app = self.get_app()
c_id = app.get_caller_id()
app.callers[c_id] = future
app.do_operation(method_name, id=self.id, caller_id=c_id, **kwargs)
def render(self):
text = "'%s' NOT YET IMPLEMENTED" % (str(self.__class__))
d = dict(id=self.id, text=text)
self._rendered = True
return '''<div id=%(id)s>%(text)s</div>''' % d
# BASIC WIDGETS
class TextEntry(WidgetBase):
html_template = '''
<input id=%(id)s type="text" maxlength=%(size)d size=%(size)d name="%(id)s"
class="%(classes)s" style="%(styles)s" %(disabled)s %(readonly)s
onkeyup="ginga_app.widget_handler('activate', '%(id)s', document.getElementById('%(id)s').value)"
value="%(text)s">
'''
def __init__(self, text='', editable=True):
super(TextEntry, self).__init__()
self.widget = None
self.text = text
self.editable = editable
self.font = default_font
self.length = 20 # seems to be default HTML5 size
self.history = LineHistory.LineHistory()
self.enable_callback('activated')
def _cb_redirect(self, event):
self.text = event.value
self.history.append(self.get_text())
self.make_callback('activated')
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
if self._rendered:
app = self.get_app()
app.do_operation('update_value', id=self.id, value=text)
def set_editable(self, tf):
self.editable = tf
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.font = font
self.add_css_styles([('font-family', font.family),
('font-size', font.point_size),
('font-style', font.style),
('font-weight', font.weight)])
if self._rendered:
app = self.get_app()
app.do_operation('update_ohtml', id=self.id, value=self.render())
def set_length(self, numchars):
# this is only supposed to set the visible length
self.length = numchars
if self._rendered:
app = self.get_app()
app.do_operation('update_ohtml', id=self.id, value=self.render())
def render(self):
# TODO: render font
d = dict(id=self.id, text=self.text, disabled='', size=self.length,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'),
readonly='')
if not self.enabled:
d['disabled'] = 'disabled'
if not self.editable:
d['readonly'] = 'readonly'
self._rendered = True
return self.html_template % d # noqa
class TextEntrySet(WidgetBase):
html_template = '''
<span class="%(classes)s" style="%(styles)s">
<input id=%(id)s type="text" size=%(size)d name="%(id)s" value="%(text)s"
class="%(classes)s" style="%(styles)s" %(readonly)s maxlength=%(size)d %(disabled)s
onkeydown="if(event.key == 'Enter') document.getElementById('%(id)s-button').click()"/>
<input type="button" %(disabled)s id="%(id)s-button"
class="%(classes)s" style="%(styles)s"
onclick="ginga_app.widget_handler('activate', '%(id)s',
document.getElementById('%(id)s').value)" value="Set"/>
</span>
'''
def __init__(self, text='', editable=True):
super(TextEntrySet, self).__init__()
self.widget = None
self.text = text
self.font = default_font
self.editable = editable
# self.entry = None
# self.btn = None
self.length = 20 # seems to be default HTML5 size
self.enable_callback('activated')
def _cb_redirect(self, event):
self.text = event.value
self.make_callback('activated')
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
if self._rendered:
app = self.get_app()
app.do_operation('update_value', id=self.id, value=text)
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.font = font
self.add_css_styles([('font-family', font.family),
('font-size', font.point_size),
('font-style', font.style),
('font-weight', font.weight)])
if self._rendered:
app = self.get_app()
app.do_operation('update_ohtml', id=self.id, name=self.render())
def set_editable(self, tf):
self.editable = tf
def set_length(self, numchars):
# this is only supposed to set the visible length
self.length = numchars
def render(self):
# TODO: render font, editable
d = dict(id=self.id, text=self.text, disabled='', size=self.length,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'),
readonly='')
if not self.editable:
d['readonly'] = 'readonly'
self._rendered = True
return self.html_template % d # noqa
class TextArea(WidgetBase):
html_template = '''
<textarea id=%(id)s name="%(id)s" %(readonly)s wrap="%(wrap)s"
class="%(classes)s" style="%(styles)s" %(disabled)s
%(editable)s onkeyup="ginga_app.widget_handler('activate', '%(id)s',
document.getElementById('%(id)s').value)">%(text)s</textarea>
<script type="text/javascript">
$(document).ready(function(){
// see python method set_wrap in this widget
ginga_app.add_widget_custom_method('%(id)s', 'update_wrap',
function (elt, msg) {
(msg.value) ? document.getElementById('%(id)s').setAttribute('wrap', 'hard') :
document.getElementById('%(id)s').setAttribute('wrap', 'off');
});
});
</script>
'''
def __init__(self, wrap=False, editable=False):
super(TextArea, self).__init__()
self.widget = None
self.editable = editable
self.wrap = wrap
self.text = ''
self.font = default_font
# is this properly a css style?
self.add_css_styles([('width', '100%')])
def _cb_redirect(self, event):
self.text = event.value
# self.make_callback('activated')
def append_text(self, text, autoscroll=True):
# if text.endswith('\n'):
# text = text[:-1]
self.text = self.text + text
if self._rendered:
app = self.get_app()
app.do_operation('update_value', id=self.id, value=self.text)
if not autoscroll:
return
if self._rendered:
app.do_operation('scroll_bottom', id=self.id)
def get_text(self):
return self.text
def clear(self):
self.text = ""
if self._rendered:
app = self.get_app()
self.set_text("")
app.do_operation('update_html', id=self.id, value=self.text)
def set_text(self, text):
self.text = text
if self._rendered:
app = self.get_app()
app.do_operation('update_value', id=self.id, value=self.text)
def set_limit(self, numlines):
# for compatibility with the other supported widget sets
pass
def set_editable(self, tf):
self.editable = tf
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.font = font
self.add_css_styles([('font-family', font.family),
('font-size', font.point_size),
('font-style', font.style),
('font-weight', font.weight)])
if self._rendered:
app = self.get_app()
app.do_operation('update_ohtml', id=self.id, value=self.render())
def set_wrap(self, tf):
self.wrap = tf
if self._rendered:
app = self.get_app()
app.do_operation('update_wrap', id=self.id, value=self.wrap)
def render(self):
# TODO: handle wrapping, render font
d = dict(id=self.id, text=self.text, disabled='', editable='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'),
readonly='', wrap='off')
if not self.enabled:
d['disabled'] = 'disabled'
if not self.editable:
d['readonly'] = 'readonly'
if self.wrap:
d['wrap'] = 'hard'
self._rendered = True
return self.html_template % d # noqa
class Label(WidgetBase):
html_template = '''
<div id=%(id)s class="%(classes)s" style="%(styles)s">%(text)s</div>
'''
def __init__(self, text='', halign='left', style='normal', menu=None):
super(Label, self).__init__()
self.text = text
self.font = default_font
self.halign = halign
self.style = style
self.fgcolor = None
self.bgcolor = None
self.menu = menu
self.widget = None
self.enable_callback('activated')
def get_text(self):
return self.text
def set_text(self, text):
self.text = text
if self._rendered:
app = self.get_app()
app.do_operation('update_label', id=self.id, value=text)
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.font = font
self.add_css_styles([('font-family', font.family),
('font-size', font.point_size),
('font-style', font.style),
('font-weight', font.weight)])
if self._rendered:
app = self.get_app()
app.do_operation('update_ohtml', id=self.id, value=self.render())
def set_color(self, fg=None, bg=None):
if fg is not None:
self.fgcolor = fg
self.add_css_styles([('color', fg)])
if bg is not None:
self.bgcolor = bg
self.add_css_styles([('background-color', bg)])
if self._rendered:
style = self.get_css_styles(fmt='str')
app = self.get_app()
app.do_operation('update_style', id=self.id, value=style)
# ...FOR HALIGN...
def set_halign(self, align=None):
if align is not None:
self.halign = align
self.add_css_styles([('text-align', align)])
if self._rendered:
app = self.get_app()
# Styles re-render after selecting a choice
app.do_operation('update_ohtml', id=self.id, value=self.render())
def render(self):
# TODO: render alignment, style, menu, clickable
d = dict(id=self.id, text=self.text,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class Button(WidgetBase):
html_template = '''
<input id=%(id)s type="button"
class="%(classes)s" style="%(styles)s" %(disabled)s
onclick="ginga_app.widget_handler('activate', '%(id)s', 'clicked')"
value="%(text)s">
'''
def __init__(self, text=''):
super(Button, self).__init__()
self.text = text
self.widget = None
self.enable_callback('activated')
def set_text(self, text):
self.text = text
if self._rendered:
app = self.get_app()
app.do_operation('update_value', id=self.id, value=self.text)
def get_text(self):
return self.text
def _cb_redirect(self, event):
self.make_callback('activated')
def render(self):
d = dict(id=self.id, text=self.text, disabled='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if not self.enabled:
d['disabled'] = 'disabled'
self._rendered = True
return self.html_template % d # noqa
class ComboBox(WidgetBase):
html_template = '''
<select id=%(id)s %(disabled)s name="%(id)s" %(multiple)s
class="%(classes)s" style="%(styles)s"
onchange="ginga_app.widget_handler('activate', '%(id)s',
document.getElementById('%(id)s').value)">
%(options)s
</select>
<script type="text/javascript">
$(document).ready(function(){
document.getElementById('%(id)s').addEventListener('wheel', function(e) {
if (e.deltaY < 0) {
this.selectedIndex = Math.max(this.selectedIndex - 1, 0);
}
if (e.deltaY > 0) {
this.selectedIndex = Math.min(this.selectedIndex + 1, this.length - 1);
}
ginga_app.widget_handler('activate', '%(id)s',
document.getElementById('%(id)s').value);
});
});
</script>
'''
def __init__(self, editable=False, multi_choice=False):
super(ComboBox, self).__init__()
self.widget = None
self.index = 0
self.multi_choice = multi_choice
self.choices = []
self.enable_callback('activated')
def _cb_redirect(self, event):
self.index = int(event.value)
self.make_callback('activated', self.index)
def insert_alpha(self, text):
index = 0
num_choices = len(self.choices)
if len(text) <= 0:
return
while index <= num_choices:
if index >= num_choices:
self.choices.append(text)
if self._rendered:
app = self.get_app()
app.do_operation('update_html', id=self.id, value=self.render())
return
item_text = self.choices[index]
if item_text > text:
self.choices.insert(index, text)
if self._rendered:
app = self.get_app()
app.do_operation('update_html', id=self.id, value=self.render())
return
index += 1
def delete_alpha(self, text):
if self.choices.count(text) != 0:
self.choices.remove(text)
if self._rendered:
app = self.get_app()
app.do_operation('update_html', id=self.id, value=self.render())
def get_alpha(self, idx):
return self.choices[idx]
def clear(self):
self.choices = []
if self._rendered:
app = self.get_app()
app.do_operation('update_html', id=self.id, value=self.render())
def set_text(self, text):
index = self.choices.index(text)
self.set_index(index)
# to be deprecated someday
show_text = set_text
def get_text(self):
idx = self.get_index()
return self.choices[idx]
def append_text(self, text):
self.choices.append(text)
def set_index(self, index):
self.index = index
if self._rendered:
app = self.get_app()
app.do_operation('update_index', id=self.id, value=self.index)
def get_index(self):
return self.index
def render(self):
d = dict(id=self.id, disabled='', multiple='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if self.multi_choice:
d['multiple'] = 'multiple'
if not self.enabled:
d['disabled'] = 'disabled'
res = [] # noqa
for idx, choice in enumerate(self.choices):
if idx == self.index:
selected = 'selected'
else:
selected = ''
res.append(''' <option value="%d" %s>%s</option>''' % (
idx, selected, choice))
d['options'] = '\n'.join(res)
self._rendered = True
return self.html_template % d
class SpinBox(WidgetBase):
html_template = '''
<input id=%(id)s value="%(value)s">
<script type="text/javascript">
$(document).ready(function(){
$('#%(id)s').spinner({ step: %(step)s, disabled: %(disabled)s,
max: %(max)s, min: %(min)s,
numberFormat: "%(format)s", culture: "fr"
});
// Set value of spinner box
ginga_app.add_widget_custom_method('%(id)s', 'set_spinval',
function (elt, msg) {
$(elt).spinner( "value", msg.value );
});
// Set limits of spinner box
ginga_app.add_widget_custom_method('%(id)s', 'set_limits',
function (elt, msg) {
var current_val = $(elt).spinner( "value" );
// set max limit
if (current_val > msg.value[1]) {
$(elt).spinner( "value", msg.value[1] );
}
$(elt).spinner({ max: msg.value[1] });
// set min limit
if (current_val < msg.value[0]) {
$(elt).spinner( "value", msg.value[0] );
}
$(elt).spinner({ min: msg.value[0] });
// set increment value
$(elt).spinner( "option", "step", msg.value[2] );
});
// Sends value when spinner value changes to client side
$('#%(id)s').on( "spin", function( event, ui ) {
ginga_app.widget_handler('activate', '%(id)s', ui.value);
});
$('#%(id)s').on( "spinchange", function( event, ui ) {
ginga_app.widget_handler('activate', '%(id)s', document.getElementById('%(id)s').value);
});
});
</script>
'''
def __init__(self, dtype=int):
super(SpinBox, self).__init__()
self.dtype = dtype
self.widget = None
self.value = dtype(0)
self.decimals = 0
self.minval = dtype(0)
self.maxval = dtype(0)
self.incr = dtype(0)
self.enable_callback('value-changed')
def _cb_redirect(self, event):
self.value = self.dtype(event.value)
self.make_callback('value-changed', self.value)
def get_value(self):
return self.dtype(self.value)
def set_value(self, val):
self.changed = True
self.value = self.dtype(val)
if self._rendered:
app = self.get_app()
app.do_operation('set_spinval', id=self.id, value=self.value)
def set_decimals(self, num):
self.decimals = num
def set_limits(self, minval, maxval, incr_value=1):
self.minval = self.dtype(minval)
self.maxval = self.dtype(maxval)
self.incr = self.dtype(incr_value)
limits = [self.minval, self.maxval, self.incr]
if self._rendered:
app = self.get_app()
app.do_operation('set_limits', id=self.id, value=limits)
def render(self):
d = dict(id=self.id, value=str(self.dtype(self.value)),
step=str(self.dtype(self.incr)),
max=str(self.dtype(self.maxval)),
format='',
min=str(self.dtype(self.minval)), disabled='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if not self.enabled:
d['disabled'] = "true"
else:
d['disabled'] = "false"
if self.dtype == int:
d['format'] = ''
elif self.dtype == float:
d['format'] = 'n3'
self._rendered = True
return self.html_template % d # noqa
class Slider(WidgetBase):
html_template = '''
<div id="%(id)s" tracking="%(tracking)s" class="%(classes)s" style="%(styles)s"></div>
<script type="text/javascript">
$(document).ready(function(){
$('#%(id)s').slider({ max: %(max)s, min: %(min)s, step: %(incr)s,
orientation: "%(orient)s", disabled: %(disabled)s,
value: %(value)s,
change: function (event, ui) {
ginga_app.widget_handler('activate', '%(id)s', ui.value);
}
});
// see python method set_value in this widget
ginga_app.add_widget_custom_method('%(id)s', 'set_slideval',
function (elt, msg) {
$(elt).slider( "option", "value", msg.value );
});
// see python method set_limits in this widget
ginga_app.add_widget_custom_method('%(id)s', 'set_limits',
function (elt, msg) {
$(elt).slider( "option", "min", msg.value[0] );
$(elt).slider( "option", "max", msg.value[1] );
$(elt).slider( "option", "step", msg.value[2] );
});
ginga_app.add_widget_custom_method('%(id)s', 'set_slidemax',
function (elt, msg) {
$(elt).slider( "option", "max", msg.value );
});
// Set tracking (NOT WORKING YET)
ginga_app.add_widget_custom_method('%(id)s', 'toggle_track',
function (elt, msg) {
document.getElementById('%(id)s').setAttribute('tracking', msg.value);
});
// Deal with tracking
if (document.getElementById('%(id)s').getAttribute('tracking') == 'true') {
$('#%(id)s').on( "slide", function( event, ui ) {
ginga_app.widget_handler('activate', '%(id)s', ui.value);
});
}
else {
$('#%(id)s').on( "slide", function( event, ui ) {
console.log("Do nothing");
});
}
});
</script>
'''
def __init__(self, orientation='horizontal', dtype=int, track=False):
super(Slider, self).__init__()
self.orientation = orientation
self.track = track
self.widget = None
self.dtype = dtype
self.value = dtype(0)
self.minval = dtype(0)
self.maxval = dtype(0)
self.incr = dtype(0)
if orientation == 'vertical':
self.add_css_styles([('-webkit-appearance', 'slider-vertical')])
self.enable_callback('value-changed')
def _cb_redirect(self, event):
self.value = self.dtype(event.value)
self.make_callback('value-changed', self.value)
def get_value(self):
return self.value
def set_value(self, val):
self.changed = True
self.value = val
if self._rendered:
app = self.get_app()
app.do_operation('set_slideval', id=self.id, value=self.value)
def set_tracking(self, tf):
self.track = tf
# TODO: Toggle tracking on/off dynamically
if self._rendered:
app = self.get_app()
app.do_operation('toggle_track', id=self.id, value=self.track)
def set_limits(self, minval, maxval, incr_value=1):
self.minval = self.dtype(minval)
self.maxval = self.dtype(maxval)
self.incr = incr_value
limits = [self.minval, self.maxval, self.incr]
if self._rendered:
app = self.get_app()
app.do_operation('set_limits', id=self.id, value=limits)
def render(self):
d = dict(id=self.id, value=str(self.dtype(self.value)),
incr=str(self.dtype(self.incr)),
max=str(self.dtype(self.maxval)),
min=str(self.dtype(self.minval)),
disabled='', orient='', tracking='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if self.orientation == 'vertical':
# firefox
d['orient'] = 'vertical'
if not self.enabled:
d['disabled'] = "true"
else:
d['disabled'] = "false"
if self.track:
d['tracking'] = "true"
else:
d['tracking'] = "false"
self._rendered = True
return self.html_template % d # noqa
class Dial(WidgetBase):
html_template = '''
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
</div>
<script type="text/javascript">
$(document).ready(function () {
$('#%(id)s').jqxKnob({ value: %(value)d,
min: %(min_val)d, max: %(max_val)d,
step: %(inc_val)d,
width: %(width)d, height: %(height)d,
snapToStep: true,
rotation: 'clockwise',
style: { stroke: '#dfe3e9', strokeWidth: 3, fill: { color: '#fefefe', gradientType: "linear", gradientStops: [[0, 1], [50, 0.9], [100, 1]] } }
});
$('#%(id)s').on('valueChanged', function (event) {
ginga_app.widget_handler('activate', '%(id)s',
parseInt(event.currentValue));
});
});
</script>
'''
def __init__(self, dtype=float, wrap=False, track=False):
super(Dial, self).__init__()
self.widget = None
self.value = 0
# this controls whether the callbacks are made *as the user
# moves the dial* or afterwards
self.tracking = track
# this controls whether we can wrap around or not
self.wrap = wrap
self.dtype = dtype
self.min_val = dtype(0)
self.max_val = dtype(100)
self.inc_val = dtype(1)
self.enable_callback('value-changed')
def _cb_redirect(self, val):
self.value = val
self.make_callback('value-changed', self.value)
def get_value(self):
return self.value
def set_value(self, val):
if val < self.min_val or val > self.max_val:
raise ValueError("Value '{}' is out of range".format(val))
self.value = val
def set_tracking(self, tf):
self.track = tf
def set_limits(self, minval, maxval, incr_value=1):
self.min_val = minval
self.max_val = maxval
self.inc_val = incr_value
def render(self):
d = dict(id=self.id, disabled='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'),
min_val=self.min_val, max_val=self.max_val,
inc_val=self.inc_val, value=self.value,
width=100, height=100)
if not self.enabled:
d['disabled'] = 'disabled'
self._rendered = True
return self.html_template % d # noqa
class ScrollBar(WidgetBase):
html_template = '''
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
</div>
<script type="text/javascript">
$(document).ready(function () {
$('#%(id)s').jqxScrollBar({ value: %(value)d,
min: 0, max: 100, step: 1,
width: %(width)s, height: %(height)s,
vertical: %(vert)s });
$('#%(id)s').on('valueChanged', function (event) {
ginga_app.widget_handler('activate', '%(id)s',
parseInt(event.currentValue));
});
// see python method set_value() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'set_scrollval',
function (elt, msg) {
$(elt).jqxScrollBar({ value: msg.value });
});
});
</script>
'''
def __init__(self, orientation='horizontal'):
super(ScrollBar, self).__init__()
self.orientation = orientation
self.widget = None
self.value = 0
self.thickness = 15
self.enable_callback('activated')
def set_value(self, value):
self.value = int(round(value * 100.0))
if self._rendered:
app = self.get_app()
app.do_operation('set_scrollval', id=self.id, value=self.value)
def get_value(self):
return self.widget.value() / 100.0
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', self.value / 100.0)
def render(self):
d = dict(id=self.id, value=self.value, disabled='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if self.orientation == 'vertical':
d['vert'] = 'true'
d['width'], d['height'] = self.thickness, "'100%'"
else:
d['vert'] = 'false'
d['width'], d['height'] = "'100%'", self.thickness
self._rendered = True
return self.html_template % d
class CheckBox(WidgetBase):
html_template = '''
<span class="%(classes)s" style="%(styles)s">
<input id=%(id)s type="checkbox" %(disabled)s %(checked)s
class="%(classes)s"
onchange="ginga_app.widget_handler('activate', '%(id)s',
document.getElementById('%(id)s').checked)"
value="%(text)s"><label for="%(id)s">%(text)s</label>
</span>
<script type="text/javascript">
$(document).ready(function () {
// see python method set_state in this widget
ginga_app.add_widget_custom_method('%(id)s', 'update_state',
function (elt, msg) {
msg.value ? document.getElementById('%(id)s').checked = true :
document.getElementById('%(id)s').checked = false;
});
});
</script>
'''
def __init__(self, text=''):
super(CheckBox, self).__init__()
self.widget = None
self.value = False
self.text = text
self.enable_callback('activated')
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', self.value)
def set_state(self, tf):
self.value = tf
if self._rendered:
app = self.get_app()
app.do_operation('update_state', id=self.id, value=self.value)
def get_state(self):
val = self.value
return val
def render(self):
d = dict(id=self.id, text=self.text, disabled='', checked='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if not self.enabled:
d['disabled'] = 'disabled'
if self.value:
d['checked'] = 'checked'
self._rendered = True
return self.html_template % d # noqa
class ToggleButton(WidgetBase):
html_template = '''
<div>
<label class="%(class1)s" style="%(styles)s">
<input id=%(id)s type="checkbox" %(disabled)s value="%(text)s" %(checked)s
onchange="ginga_app.widget_handler('activate', '%(id)s',
document.getElementById('%(id)s').checked)">
<span class="%(class2)s"></span>
</label>
<label for="%(id)s" style="%(styles)s">%(text)s</label>
</div>
<script type="text/javascript">
$(document).ready(function () {
// see python method set_state in this widget
ginga_app.add_widget_custom_method('%(id)s', 'update_state',
function (elt, msg) {
msg.value ? document.getElementById('%(id)s').checked = true :
document.getElementById('%(id)s').checked = false;
});
});
</script>
'''
def __init__(self, text=''):
super(ToggleButton, self).__init__()
# self.widget = QtGui.QPushButton(text)
# self.widget.setCheckable(True)
# self.widget.clicked.connect(self._cb_redirect)
self.widget = None
self.value = False
self.text = text
self.enable_callback('activated')
self.add_css_classes(['switch', 'slider round'])
self.add_css_styles([('float', 'left')])
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', self.value)
def set_state(self, tf):
self.value = tf
if self._rendered:
app = self.get_app()
app.do_operation('update_state', id=self.id, value=self.value)
def get_state(self):
return self.value
def render(self):
css_classes = self.get_css_classes(fmt='str').split(' ', 1)
d = dict(id=self.id, text=self.text, disabled='', checked='',
class1=[k for k in css_classes if 'switch' in k][0],
class2=[k for k in css_classes if 'slider round' in k][0],
styles=self.get_css_styles(fmt='str'))
if not self.enabled:
d['disabled'] = 'disabled'
if self.value:
d['checked'] = 'checked'
self._rendered = True
return self.html_template % d # noqa
class RadioButton(WidgetBase):
html_template = '''
<span class="%(classes)s" style="%(styles)s">
<input id=%(id)s name="%(group)s" type="radio"
class="%(classes)s"
%(disabled)s onchange="ginga_app.widget_handler('activate', '%(id)s',
document.getElementById('%(id)s').value)" %(checked)s
value="true">%(text)s
</span>
'''
group_cnt = 0
def __init__(self, text='', group=None):
super(RadioButton, self).__init__()
# self.widget = QtGui.QRadioButton(text)
# self.widget.toggled.connect(self._cb_redirect)
self.widget = None
self.text = text
self.value = False
self.group_name = None
if group is None:
self.group_name = "radio%d" % (RadioButton.group_cnt)
RadioButton.group_cnt += 1
self.group = [self]
else:
self.group = group.group
self.group_name = group.group_name
self.enable_callback('activated')
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', event.value)
def set_state(self, tf):
if self.value != tf:
# toggled only fires when the value is toggled
self.changed = True
self.value = tf
def get_state(self):
return self.value
def render(self):
d = dict(id=self.id, disabled='', checked='',
group=self.group_name, text=self.text,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if not self.enabled:
d['disabled'] = 'disabled'
if self.value:
d['checked'] = 'checked'
self._rendered = True
return self.html_template % d # noqa
class Image(WidgetBase):
html_template = '''
<img id=%(id)s src="%(src)s" alt="%(tooltip)s"
class="%(classes)s" style="%(styles)s">
'''
def __init__(self, native_image=None, style='normal', menu=None):
super(Image, self).__init__()
self.image = None
self.img_src = ''
self.menu = menu
self.widget = None
self.enable_callback('activated')
if native_image is not None:
self._set_image(native_image)
def _cb_redirect(self, event):
self.value = event.value
self.make_callback('activated', event.value)
def _set_image(self, native_image):
self.image = native_image
self.img_src = PgHelp.get_image_src_from_buffer(self.image)
if self._rendered:
app = self.get_app()
app.do_operation('update_imgsrc', id=self.id, value=self.img_src)
def load_file(self, img_path, format=None):
if format is None:
format = 'png'
img = PgHelp.get_native_image(img_path, format=format)
self._set_image(img)
def render(self):
# TODO: callback for click
d = dict(id=self.id, src=self.img_src, tooltip=self.tooltip,
height=self.height, width=self.width,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class ProgressBar(WidgetBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
</div>
<script type="text/javascript">
$(document).ready(function () {
$('#%(id)s').jqxProgressBar({ value: %(value)d, disabled: %(disabled)s,
showText: true,
width: %(width)s, height: %(height)s,
orientation: '%(orient)s' });
// see python method set_index() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'set_progress',
function (elt, msg) {
$(elt).jqxProgressBar('value', msg.value);
});
});
</script>
"""
def __init__(self, orientation='horizontal'):
super(ProgressBar, self).__init__()
self.value = 0.0
self.orientation = orientation
self.widget = None
self.thickness = 15
def set_value(self, pct):
self.value = pct
# jqxProgressBar needs integer values in the range 0-100
pct = int(self.value * 100.0)
if self._rendered:
app = self.get_app()
app.do_operation('set_progress', id=self.id, value=pct)
def render(self):
pct = int(self.value * 100.0)
d = dict(id=self.id, value=pct, disabled='false',
orient=self.orientation,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if self.orientation == 'vertical':
d['width'], d['height'] = self.thickness, "'100%'"
else:
d['width'], d['height'] = "'100%'", self.thickness
self._rendered = True
return self.html_template % d
class StatusBar(Label):
def __init__(self):
super(StatusBar, self).__init__()
def clear_message(self):
self.set_text('')
def set_message(self, msg_str, duration=10.0):
# TODO: remove message in about `duration` seconds
self.set_text(msg_str)
class TreeView(WidgetBase):
html_template = """
<div id="%(id)s">
</div>
<script type="text/javascript">
var source = %(source)s;
var columns = %(columns)s;
var dataAdapter = new $.jqx.dataAdapter(source);
$(document).ready(function () {
$("#%(id)s").jqxTreeGrid({
altRows: %(use_alt_row_color)s,
sortable: %(sortable)s,
source: dataAdapter,
width: %(width)s,
columns: columns,
columnsResize: true,
selectionMode: "%(selectionMode)s"
});
// The rowSelect event tells us that a row has been selected.
$("#%(id)s").on("rowSelect", function (event) {
// Call the getSelection method to get the list of selected
// rows so we can send the list back to the Python code.
var rowsSelected = $("#%(id)s").jqxTreeGrid("getSelection");
var payload = [];
// Send only the list of rowid values back to the Python code
for (var i = 0; i < rowsSelected.length; i++) {
payload[i] = rowsSelected[i]["rowid"];
}
ginga_app.widget_handler("row-select", "%(id)s", payload);
});
// The rowDoubleClick event tells us that a cell has been double-clicked on.
$("#%(id)s").on("rowDoubleClick", function (event) {
var payload = {rowid: event.args.row["rowid"], dataField: event.args.dataField}
ginga_app.widget_handler("double-click", "%(id)s", payload);
});
// see python method clear() in this widget
ginga_app.add_widget_custom_method("%(id)s","clear",
function (elt, msg) {
$(elt).jqxTreeGrid("clear");
});
// see python method clear_selection() in this widget
ginga_app.add_widget_custom_method("%(id)s","clear_selection",
function (elt, msg) {
$(elt).jqxTreeGrid("clearSelection");
});
// see python method scroll_to_path() in this widget
ginga_app.add_widget_custom_method("%(id)s","scroll_to_path",
function (elt, msg) {
$(elt).jqxTreeGrid("ensureRowVisible", msg.index);
});
// see python method select_path() in this widget
ginga_app.add_widget_custom_method("%(id)s","select_row",
function (elt, msg) {
var method;
if (msg.state) {
method = "selectRow";
} else {
method = "unselectRow";
}
$(elt).jqxTreeGrid(method, msg.index);
});
// see python method sort_on_column() in this widget
ginga_app.add_widget_custom_method("%(id)s","sort_on_column",
function (elt, msg) {
$(elt).jqxTreeGrid("sortBy", msg.dataField, msg.sortOrder);
});
// see python method set_column_width() in this widget
ginga_app.add_widget_custom_method("%(id)s","set_column_property",
function (elt, msg) {
$(elt).jqxTreeGrid("setColumnProperty", msg.dataField, msg.property, msg.width);
});
});
</script>
"""
def __init__(self, auto_expand=False, sortable=False, selection='single',
use_alt_row_color=False, dragable=False):
super(TreeView, self).__init__()
self.auto_expand = auto_expand
self.sortable = sortable
self.jQWidgetsSelectionModes = dict(single='singleRow', multiple='multipleRows')
self.selection = self.jQWidgetsSelectionModes[selection]
self.use_alt_row_color = use_alt_row_color
# TODO: "dragable" actions not yet implemented
self.dragable = dragable
self.levels = 1
self.leaf_key = None
self.leaf_idx = 0
self.columns = []
self.columnWidths = []
self.datakeys = []
# shadow index
self.shadow = {}
self.widget = None
# self.localData will be populated in the manner required by
# jqxTreeGrid.
self.localData = []
self.rowid = -1
self.rows = []
# We need to keep track of the row(s) that the user has
# selected.
self.selectedRows = []
for cbname in ('selected', 'activated', 'drag-start'):
self.enable_callback(cbname)
def setup_table(self, columns, levels, leaf_key):
self.clear()
self.columns = columns
self.levels = levels
self.leaf_key = leaf_key
for i in range(len(columns)):
self.columnWidths.append(None)
def set_tree(self, tree_dict):
self.rowid = -1
self.clear()
self.localData = []
self.add_tree(tree_dict)
def add_tree(self, tree_dict):
if self.sortable:
keys = sorted(tree_dict)
else:
keys = tree_dict.keys()
for key in keys:
self._add_subtree(1, self.shadow, None, key, tree_dict[key])
def _add_subtree(self, level, shadow, parent_item, key, node):
def _addTopLevelItem(item):
self.localData.append(item)
def _addChild(parent_item, item):
parent_item['children'].append(item)
if level >= self.levels:
# leaf node
try:
bnch = shadow[key]
item = bnch.item
# TODO: update leaf item
except KeyError:
# new item
item = node
self.rowid += 1
item['rowid'] = self.rowid
if level == 1:
item['parentRowNum'] = None
_addTopLevelItem(item)
else:
item['parentRowNum'] = parent_item['rowid']
_addChild(parent_item, item)
shadow[key] = Bunch.Bunch(node=node, item=item, terminal=True)
self.rows.append(item)
else:
try:
# node already exists
bnch = shadow[key]
item = bnch.item
d = bnch.node
except KeyError:
# new node
self.rowid += 1
item = {self.leaf_key: str(key), 'expanded': self.auto_expand,
'rowid': self.rowid, 'children': []}
if level == 1:
item['parentRowNum'] = None
_addTopLevelItem(item)
else:
item['parentRowNum'] = parent_item['rowid']
_addChild(parent_item, item)
d = {}
shadow[key] = Bunch.Bunch(node=d, item=item, terminal=False)
self.rows.append(item)
# recurse for non-leaf interior node
if self.sortable:
keys = sorted(node)
else:
keys = node.keys()
for key in keys:
self._add_subtree(level + 1, d, item, key, node[key])
def _selection_cb(self):
res_dict = self.get_selected()
self.make_callback('selected', res_dict)
def _cb_redirect(self, event):
res_dict = {}
# We handle the following two event types:
# 1. row-select
# 2. double-click
if event.type == 'row-select':
self.selectedRows = event.value
res_dict = self.get_selected()
self.make_callback('selected', res_dict)
elif event.type == 'double-click':
self._get_item(res_dict, event.value['rowid'])
self.make_callback('activated', res_dict)
def _get_path(self, rowNum):
if rowNum is None:
return []
row = self.rows[rowNum]
try:
childCount = len(row['children'])
except KeyError:
childCount = 0
if childCount == 0:
path_rest = self._get_path(row['parentRowNum'])
myname = row[self.leaf_key]
path_rest.append(myname)
return path_rest
colTitle0, fieldName0 = self.columns[0]
myname = row[fieldName0]
parentRowNum = row['parentRowNum']
path_rest = self._get_path(parentRowNum)
path_rest.append(myname)
return path_rest
def _get_item(self, res_dict, rowNum):
path = self._get_path(rowNum)
d, s = res_dict, self.shadow
for name in path[:-1]:
d = d.setdefault(name, {})
s = s[name].node
dst_key = path[-1]
try:
d[dst_key] = s[dst_key].node
except KeyError:
d[dst_key] = None
def get_selected(self):
res_dict = {}
for rowNum in self.selectedRows:
try:
children = self.rows[rowNum]['children']
if len(children) > 0:
continue
except KeyError:
pass
self._get_item(res_dict, rowNum)
return res_dict
def clear(self):
self.rowid = -1
self.rows = []
self.localData = []
self.shadow = {}
self.selectedRows = []
if self._rendered:
app = self.get_app()
app.do_operation('clear', id=self.id)
def clear_selection(self):
self.selectedRows = []
if self._rendered:
app = self.get_app()
app.do_operation('clear_selection', id=self.id)
def _path_to_item(self, path):
s = self.shadow
for name in path[:-1]:
s = s[name].node
item = s[path[-1]].item
return item
def select_path(self, path, state=True):
item = self._path_to_item(path)
if self.selectedRows.count(item) < 1:
self.selectedRows.append(item)
if self._rendered:
app = self.get_app()
app.do_operation('select_row', id=self.id, index=item['rowid'], state=state)
def highlight_path(self, path, onoff, font_color='green'):
item = self._path_to_item(path) # noqa
# TODO - Is there be a way to do this with CSS?
def scroll_to_path(self, path):
item = self._path_to_item(path)
if self._rendered:
app = self.get_app()
app.do_operation('scroll_to_path', id=self.id, index=item['rowid'])
def scroll_to_end(self):
# TODO
pass
def sort_on_column(self, i):
colTitle, fieldName = self.columns[i]
if self._rendered:
app = self.get_app()
app.do_operation('sort_on_column', id=self.id, dataField=fieldName, sortOrder='asc')
def set_column_width(self, i, width):
self.columnWidths[i] = width
colTitle, fieldName = self.columns[i]
if self._rendered:
app = self.get_app()
app.do_operation('set_column_property', id=self.id, dataField=fieldName, property='width', width=width)
def set_column_widths(self, lwidths):
for i, width in enumerate(lwidths):
if width is not None:
self.set_column_width(i, width)
def set_optimal_column_widths(self):
# TODO - looks like jqxTreeGrid API doesn't have a way to
# automatically re-size the column width to fit the contents
for i in range(len(self.columns)):
pass
def get_column_widths(self):
return list(self.columnWidths)
def columns_to_js(self):
col_arr = []
for i, colTuple in enumerate(self.columns):
colTitle, fieldName = colTuple
col_arr.append(dict(text=colTitle, dataField=fieldName))
if self.columnWidths[i] is not None:
col_arr[i]['width'] = self.columnWidths[i]
columns_js = json.dumps(col_arr)
return columns_js
def source_obj_js(self):
s = dict(dataType='json',
dataFields=[{'name': 'rowid', 'type': 'number'},
{'name': 'children', 'type': 'array'},
{'name': 'expanded', 'type': 'bool'}],
localData=self.localData,
hierarchy={'root': 'children'},
id='rowid',
sortColumn='rowid')
for colTitle, fieldName in self.columns:
s['dataFields'].append(dict(name=fieldName, type='string'))
source_js = json.dumps(s)
return source_js
def render(self):
self.columns_to_js()
d = dict(id=self.id,
columns=self.columns_to_js(),
source=self.source_obj_js(),
use_alt_row_color=json.dumps(self.use_alt_row_color),
sortable=json.dumps(self.sortable),
width=self.width,
selectionMode=self.selection)
self._rendered = True
return self.html_template % d
class Canvas(WidgetBase):
canvas_template = '''
<canvas id="%(id)s" tabindex="%(tab_idx)d"
class="%(classes)s" style="%(styles)s"
width="%(width)s" height="%(height)s"
minWidth=1 minHeight=1>
Your browser does not appear to support HTML5 canvas.</canvas>
<script type="text/javascript">
ginga_initialize_canvas(document.getElementById("%(id)s"), "%(id)s",
ginga_app);
</script>
''' # noqa
def __init__(self, width=600, height=600):
super(Canvas, self).__init__()
self.widget = None
self.width = width
self.height = height
self.name = ''
def _cb_redirect(self, event):
pass
def _draw(self, shape_type, **kwargs):
shape = dict(kwargs, type=shape_type)
# TODO: save shapes to be sent if canvas is not rendered?
if self._rendered:
app = self.get_app()
app.do_operation("draw_canvas", id=self.id, shape=shape)
def clear_rect(self, x, y, width, height):
self._draw("clear", x=x, y=y, width=width, height=height)
def draw_image(self, img_buf, x, y, width=None, height=None):
img_src = PgHelp.get_image_src_from_buffer(img_buf)
self._draw("image", x=x, y=y, src=img_src, width=width, height=height)
def render(self):
global tab_idx
# canvas needs a tabindex to be able to focus it and register
# for keyboard events
tab_idx += 1
d = dict(id=self.id, width=self.width, height=self.height,
tab_idx=tab_idx,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return Canvas.canvas_template % d
# CONTAINERS
class ContainerBase(WidgetBase):
def __init__(self):
super(ContainerBase, self).__init__()
# TODO: probably need to maintain children as list of widget ids
self.children = []
for name in ['widget-added', 'widget-removed']:
self.enable_callback(name)
def add_ref(self, ref):
# TODO: should this be a weakref?
self.children.append(ref)
def remove(self, child, delete=False):
if child not in self.children:
raise KeyError("Widget is not a child of this container")
self.children.remove(child)
if self._rendered:
app = self.get_app()
app.do_operation('remove_child', id=child.id)
self.make_callback('widget-removed', child)
def remove_all(self):
children = list(self.children)
for child in children:
self.remove(child)
def get_children(self):
return self.children
def num_children(self):
return len(self.children)
def render(self):
self._rendered = True
return self.render_children()
def render_children(self, ifx=' ', spacing=0, spacing_side='right'):
# TODO: find a way to avoid overriding any padding specifically
# set in the child
if spacing_side == 'right':
margins = (0, spacing, 0, 0)
else:
margins = (0, 0, spacing, 0)
res = []
children = self.get_children()
for child in children:
if child != children[-1]:
child.set_margins(*margins)
res.append(child.render())
return ifx.join(res)
class Box(ContainerBase):
html_template = '''
<div id=%(id)s class="%(classes)s" style="%(styles)s">
%(content)s
</div>
<script> type="text/javascript">
$(document).ready(function () {
// see python method insert_widget() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'insert_child',
function (elt, msg) {
let child = elt.children[msg.index];
let numChildren = elt.children.length;
// if widget needs to be inserted at the end
if (msg.index == numChildren) {
child = elt.children[msg.index-1];
child.insertAdjacentHTML('afterend', msg.value);
}
else {
child.insertAdjacentHTML('beforebegin', msg.value);
}
});
});
</script>
'''
def __init__(self, orientation='horizontal'):
super(Box, self).__init__()
self.orientation = orientation
self.widget = None
self.spacing = 0
if self.orientation == 'horizontal':
self.add_css_classes(['hbox'])
else:
self.add_css_classes(['vbox'])
def insert_widget(self, idx, child, stretch=0.0):
self.add_ref(child)
flex = int(round(stretch))
child.add_css_styles([('flex-grow', flex), ('flex-shrink', 1)])
if self._rendered:
app = self.get_app()
app.do_operation('insert_child', id=self.id, value=child.render(), index=idx)
self.make_callback('widget-added', child)
def add_widget(self, child, stretch=0.0):
self.add_ref(child)
flex = int(round(stretch))
# Consider whether we need to add the following:
# -webkit-flex-grow, -ms-flex-grow, -moz-flex-grow
# and their "shrink" conterparts
child.add_css_styles([('flex-grow', flex), ('flex-shrink', 1)])
if self._rendered:
app = self.get_app()
app.do_operation('append_child', id=self.id, value=child.render())
self.make_callback('widget-added', child)
def set_spacing(self, val):
self.spacing = val
def render(self):
# TODO: handle spacing attribute
d = dict(id=self.id,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if self.orientation == 'horizontal':
d['content'] = self.render_children(spacing=self.spacing,
spacing_side='right')
else:
d['content'] = self.render_children(spacing=self.spacing,
spacing_side='bottom')
self._rendered = True
return self.html_template % d
class HBox(Box):
def __init__(self):
super(HBox, self).__init__(orientation='horizontal')
class VBox(Box):
def __init__(self):
super(VBox, self).__init__(orientation='vertical')
class Frame(ContainerBase):
html_template = '''
<div id='%(id)s' class="%(parent)s" style="%(styles)s">
<h6 class="%(child1)s">%(title)s</h6>
<div class="%(child2)s">%(content)s</div>
</div>
<script type="text/javascript">
$(document).ready(function () {
// see python method set_widget() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'update_widget',
function (elt, msg) {
$(".%(child2)s").empty();
$(".%(child2)s").append(msg.value);
});
// see python method set_text() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'update_frametext',
function (elt, msg) {
elt.querySelector(".%(child1)s").innerHTML = msg.value;
});
});
</script>
'''
def __init__(self, title=None):
super(Frame, self).__init__()
self.widget = None
self.label = title
self.add_css_classes(['frame', 'frame-widget', 'frame-text'])
def set_widget(self, child, stretch=1):
self.remove_all()
self.add_ref(child)
if self._rendered:
app = self.get_app()
app.do_operation('update_widget', id=self.id, value=child.render())
def render(self):
children = self.get_children()
css_classes = self.get_css_classes(fmt='str').split()
if len(children) == 0:
content = ''
else:
content = children[0].render()
d = dict(id=self.id, content=content, title=self.label,
parent=[k for k in css_classes if 'frame' in k][0],
child1=[k for k in css_classes if 'frame-text' in k][0],
child2=[k for k in css_classes if 'frame-widget' in k][0],
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
def set_text(self, text):
self.label = text
if self._rendered:
app = self.get_app()
app.do_operation('update_frametext', id=self.id, value=self.label)
class Expander(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
<div> %(title)s </div>
<div>
%(content)s
</div>
</div>
<script type="text/javascript">
$(document).ready(function () {
$("#%(id)s").jqxExpander({ width: '%(width)s',
expanded: false });
// see python method set_widget() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'update_expander',
function (elt, msg) {
$(elt).jqxExpander('setContent', msg.value);
});
});
</script>
"""
def __init__(self, title='', notoggle=False):
super(Expander, self).__init__()
if notoggle:
raise NotImplementedError("notoggle=True not implemented "
"for this backend")
self.widget = None
self.label = title
def set_widget(self, child, stretch=1):
self.remove_all()
self.add_ref(child)
if self._rendered:
app = self.get_app()
app.do_operation('update_expander', id=self.id, value=child.render())
def render(self):
children = self.get_children()
if len(children) == 0:
content = ''
else:
content = children[0].render()
d = dict(id=self.id, content=content, title=self.label,
width=500,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class TabWidget(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
%(tabs)s
%(content)s
</div>
<script type="text/javascript">
$(document).ready(function () {
$('#%(id)s').tabs({ active: '%(pos)s', heightStyle: 'fill' });
$('#%(id)s').on('tabsactivate', function (event, ui) {
ginga_app.widget_handler('activate', '%(id)s',
event['owner']['selectedItem']);
});
// see python method set_index() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'select_tab',
function (elt, msg) {
$(elt).tabs('option', 'active', msg.index);
});
});
</script>
"""
def __init__(self, tabpos='top', reorderable=False, detachable=True,
group=0):
super(TabWidget, self).__init__()
self.reorderable = reorderable
self.detachable = detachable
self.group = group
self.widget = None
self.index = 0
self.set_tab_position(tabpos)
self.titles = []
self.add_css_classes(['ui-tabs'])
self._tabs_visible = True
for name in ('page-switch', 'page-close', 'page-move', 'page-detach'):
self.enable_callback(name)
def _update(self):
if self._rendered:
app = self.get_app()
app.do_operation('update_html', id=self.id, value=self.render())
def set_tab_position(self, tabpos):
tabpos = tabpos.lower()
if tabpos not in ('top', 'bottom'):
raise ValueError("pg widgets doesn't support tabs position '%s'" % (
tabpos))
self.tabpos = tabpos
def _cb_redirect(self, event):
self.index = event.value
child = self.index_to_widget(self.index)
self.make_callback('page-switch', child)
def add_widget(self, child, title=''):
self.add_ref(child)
self.titles.append(title)
# attach title to child
child.extdata.tab_title = title
if self._rendered:
app = self.get_app() # noqa
app.do_operation('append_child', id=self.id, value=child.render())
# this is a hack--we really don't want to reload the page, but just
# re-rendering the HTML does not seem to process the CSS right
#app.do_operation('reload_page', id=self.id)
self.make_callback('widget-added', child)
def get_index(self):
return self.index
def set_index(self, idx):
self.index = idx
if self._rendered:
app = self.get_app()
app.do_operation('select_tab', id=self.id, index=self.index)
def index_of(self, child):
try:
return self.children.index(child)
except ValueError:
return -1
def index_to_widget(self, idx):
"""Returns child corresponding to `idx`"""
return self.children[idx]
def render(self):
d = dict(id=self.id, pos=self.tabpos, tabs='',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
if self._tabs_visible:
# draw tabs
res = ['''<ul class="ui-tabs-nav">\n''']
for child in self.get_children():
res.append('''<li> <a href="#%s-%s"> %s </a></li>\n''' % (
self.id, child.id, child.extdata.tab_title))
res.append("</ul>\n")
d['tabs'] = '\n'.join(res)
res = ['''<div id="%s-%s"> %s </div>\n''' % (self.id, child.id,
child.render())
for child in self.get_children()]
d['content'] = '\n'.join(res)
self._rendered = True
return self.html_template % d
class StackWidget(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
%(content)s
</div>
"""
def __init__(self):
super(StackWidget, self).__init__()
self.add_css_classes(['stackbox'])
self.index = 0
def add_widget(self, child, title=""):
children = self.get_children()
child.add_css_styles([('grid-column-start', '1'), ('grid-row-start', '1')])
# if there are no children, set first added widget to be visible
if len(children) == 0:
self.index = 0
self.add_ref(child)
child.add_css_styles([('visibility', 'visible')])
# hide all other children
else:
self.add_ref(child)
child.add_css_styles([('visibility', 'hidden')])
if self._rendered:
app = self.get_app()
app.do_operation("append_child", id=self.id, value=child.render())
self.make_callback('widget-added', child)
def get_index(self):
return self.index
def set_index(self, idx):
child = self.get_children()
child[idx].add_css_styles([('visibility', 'hidden')])
if self._rendered:
new_visible = child[idx]
old_visible = child[self.index]
old_visible.add_css_styles([('visibility', 'hidden')])
new_visible.add_css_styles([('visibility', 'visible')])
style1 = new_visible.get_css_styles(fmt='str')
style2 = old_visible.get_css_styles(fmt='str')
app = self.get_app()
app.do_operation("update_style", id=new_visible.id, value=style1)
app.do_operation("update_style", id=old_visible.id, value=style2)
self.index = idx
def index_of(self, child):
try:
return self.children.index(child)
except ValueError:
return -1
def index_to_widget(self, idx):
return self.children[idx]
def render(self):
children = self.get_children()
d = dict(id=self.id,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
d['content'] = self.render_children(spacing_side='bottom')
self._rendered = True
return self.html_template % d
class MDIWidget(TabWidget):
def __init__(self, tabpos='top', mode='tabs'):
super(MDIWidget, self).__init__(tabpos=tabpos)
self.mode = 'tabs'
self.true_mdi = False
def get_mode(self):
return self.mode
def set_mode(self, mode):
pass
def tile_panes(self):
pass
def cascade_panes(self):
pass
def use_tabs(self, tf):
pass
class ScrollArea(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
%(content)s
</div>
<script type="text/javascript">
$(document).ready(function () {
$("#%(id)s").jqxPanel({ width: '%(width)s', height: '%(height)s' });
// see python methods scroll_to_end and scroll_to_pct in this widget
ginga_app.add_widget_custom_method('%(id)s', 'scroll_vert',
function (elt, msg) {
var end_height = $("#%(id)s").jqxPanel('getScrollHeight');
var current_hscroll = $("#%(id)s").jqxPanel('getHScrollPosition');
if (msg.value >= 0 && msg.value <= 100) {
$(elt).jqxPanel('scrollTo', current_hscroll, pct_to_position(end_height, msg.value));
}
else {
$(elt).jqxPanel('scrollTo', current_hscroll, end_height);
}
});
ginga_app.add_widget_custom_method('%(id)s', 'scroll_hori',
function (elt, msg) {
var end_width = $("#%(id)s").jqxPanel('getScrollWidth');
var current_vscroll = $("#%(id)s").jqxPanel('getVScrollPosition');
if (msg.value >= 0 && msg.value <= 100) {
$(elt).jqxPanel('scrollTo', pct_to_position(end_width, msg.value), current_vscroll);
}
else {
$(elt).jqxPanel('scrollTo', end_width, current_vscroll);
}
});
// convert percentage to a position value
function pct_to_position(position_total, pct) {
return pct / 100 * position_total;
}
});
</script>
"""
def __init__(self):
super(ScrollArea, self).__init__()
self.widget = None
self.enable_callback('configure')
def set_widget(self, child):
self.remove_all()
self.add_ref(child)
def scroll_to_end(self, vertical=True, horizontal=False):
if self._rendered:
app = self.get_app()
if vertical:
app.do_operation('scroll_vert', id=self.id)
if horizontal:
app.do_operation('scroll_hori', id=self.id)
def scroll_to_pct(self, percent, vertical=True, horizontal=False):
if self._rendered:
app = self.get_app()
if vertical:
app.do_operation('scroll_vert', id=self.id, value=percent)
if horizontal:
app.do_operation('scroll_hori', id=self.id, value=percent)
def render(self):
children = self.get_children()
if len(children) == 0:
content = ''
else:
content = children[0].render()
d = dict(id=self.id, content=content,
width='100%', height='100%',
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class Splitter(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
%(panels)s
</div>
<script type="text/javascript">
$(document).ready(function () {
$('#%(id)s').jqxSplitter({ width: '100%%', height: '100%%',
orientation: '%(orient)s',
disabled: %(disabled)s,
panels: %(sizes)s
});
$('#%(id)s').on('resize', function (event) {
var sizes = [];
for (i = 0; i < event.args.panels.length; i++) {
var panel = event.args.panels[i];
sizes.push(panel.size);
}
ginga_app.widget_handler('activate', '%(id)s', sizes);
});
ginga_app.add_widget_custom_method('%(id)s', 'add_splitter',
function (elt) {
$('#%(id)s').jqxSplitter({ width: '100%%', height: '100%%',
orientation: '%(orient)s',
disabled: %(disabled)s,
panels: %(sizes)s
});
});
});
</script>
"""
def __init__(self, orientation='horizontal', thumb_px=8):
super(Splitter, self).__init__()
self.orientation = orientation
self.widget = None
self.thumb_px = thumb_px
self.sizes = []
self.enable_callback('activated')
def add_widget(self, child):
self.add_ref(child)
self.make_callback('widget-added', child)
if len(self.sizes) > 2:
app = self.get_app()
app.do_operation('add_splitter', id=self.id)
def get_sizes(self):
return self.sizes
def set_sizes(self, sizes):
self.sizes = sizes
# TODO:
#self.call_custom_method('set_sizes', sizes=self.sizes)
def _cb_redirect(self, event):
self.set_sizes(event.value)
self.make_callback('activated', self.sizes)
def render(self):
panels = ['''<div> %s </div>''' % (child.render())
for child in self.get_children()]
sizes = ['''{ size: %d }''' % size
for size in self.sizes]
disabled = str(not self.enabled).lower()
if self.orientation == 'vertical':
orient = 'horizontal'
else:
orient = 'vertical'
d = dict(id=self.id, panels='\n'.join(panels), disabled=disabled,
sizes='[ %s ]' % ','.join(sizes), orient=orient,
width=500, height=500,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class GridBox(ContainerBase):
html_template = '''
<table id=%(id)s class="%(classes)s" style="%(styles)s">
%(content)s
</table>
<script type="text/javascript">
// see python method insert_cell in this widget
ginga_app.add_widget_custom_method('%(id)s', 'insert_cell',
function (elt, msg) {
let table = document.getElementById("%(id)s");
let row = msg.value[0];
table.rows.item(row).insertCell(msg.value[1]);
});
// see python method insert_row in this widget
ginga_app.add_widget_custom_method('%(id)s', 'insert_row',
function (elt, msg) {
let index = msg.value[0];
let numColumns = msg.value[1];
let newRow = document.getElementById("%(id)s").insertRow(index);
for (let i = 0; i < numColumns; i++) {
newRow.insertCell(i);
}
});
// see python method append_row in this widget
ginga_app.add_widget_custom_method('%(id)s', 'append_row',
function (elt, msg) {
let numRows = msg.value[0];
let numColumns = msg.value[1];
let newRow = document.getElementById("%(id)s").insertRow(numRows);
for (let i = 0; i < numColumns; i++) {
newRow.insertCell(i);
}
});
// see python method delete_row in this widget
ginga_app.add_widget_custom_method('%(id)s', 'delete_row',
function (elt, msg) {
document.getElementById("%(id)s").deleteRow(msg.value);
});
</script>
'''
def __init__(self, rows=1, columns=1):
super(GridBox, self).__init__()
self.widget = None
self.num_rows = rows
self.num_cols = columns
self.row_spacing = 0
self.col_spacing = 0
self.tbl = {}
def resize_grid(self, rows, columns):
self.num_rows = rows
self.num_cols = columns
def set_row_spacing(self, val):
self.row_spacing = val
self.add_css_styles([('border-collapse', 'separate')])
self.add_css_styles([('border-spacing', ' %dpx %dpx' % (self.col_spacing, self.row_spacing))])
style = self.get_css_styles(fmt='str')
if self._rendered:
app = self.get_app()
app.do_operation("update_style", id=self.id, value=style)
def set_spacing(self, val):
self.set_row_spacing(val)
self.set_column_spacing(val)
def set_column_spacing(self, val):
self.col_spacing = val
self.add_css_styles([('border-collapse', 'separate')])
self.add_css_styles([('border-spacing', ' %dpx %dpx' % (self.col_spacing, self.row_spacing))])
style = self.get_css_styles(fmt='str')
if self._rendered:
app = self.get_app()
app.do_operation("update_style", id=self.id, value=style)
def add_widget(self, child, row, col, stretch=0):
self.add_ref(child)
self.num_rows = max(self.num_rows, row + 1)
self.num_cols = max(self.num_cols, col + 1)
self.tbl[(row, col)] = child
if self._rendered:
app = self.get_app()
app.do_operation('update_html', id=self.id,
value=self.render_body())
self.make_callback('widget-added', child)
def insert_cell(self, row, col):
indices = [row, col]
if self._rendered:
app = self.get_app()
app.do_operation("insert_cell", id=self.id, value=indices)
def insert_row(self, index):
indices = [index, self.num_cols]
self.num_rows += 1
# handle case where user inserts row at the end of the gridbox
if index == self.num_rows - 1:
for j in range(self.num_cols):
self.tbl[(index, j)] = Box()
else:
# shift key/value pairs down to make the row empty at index
for i in range(self.num_rows - 2, index - 1, -1):
for j in range(self.num_cols):
self.tbl[(i + 1, j)] = self.tbl[(i, j)]
# populate inserted row with empty Boxes for render_body()
for j in range(self.num_cols):
self.tbl[(index, j)] = Box()
if self._rendered:
app = self.get_app()
app.do_operation("insert_row", id=self.id, value=indices)
def append_row(self):
indices = [self.num_rows, self.num_cols]
if self._rendered:
app = self.get_app()
app.do_operation("append_row", id=self.id, value=indices)
self.num_rows += 1
# populate appended row with empty Boxes for render_body()
for j in range(self.num_cols):
self.tbl[(self.num_rows - 1, j)] = Box()
def delete_row(self, index):
if index < 0 or index >= self.num_rows:
print("Index out of bounds")
return
if index == self.num_rows - 1:
for j in range(self.num_cols):
self.tbl.pop((self.num_rows - 1, j))
else:
# shift dict key, value pairs up
for i in range(index + 1, self.num_rows):
for j in range(self.num_cols):
self.tbl[(i - 1, j)] = self.tbl[(i, j)]
# delete items in last row to maintain self.tbl
for j in range(self.num_cols):
self.tbl.pop((self.num_rows - 1, j))
self.num_rows -= 1
if self._rendered:
app = self.get_app()
app.do_operation("delete_row", id=self.id, value=index)
def render_body(self):
res = []
for i in range(self.num_rows):
res.append(" <tr>")
for j in range(self.num_cols):
res.append(" <td>")
key = (i, j)
if key in self.tbl:
res.append(self.tbl[key].render())
else:
res.append("")
res.append(" </td>")
res.append(" </tr>")
return '\n'.join(res)
def render(self):
d = dict(id=self.id,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'),
content=self.render_body())
self._rendered = True
return self.html_template % d
class ToolbarAction(WidgetBase):
def __init__(self):
super(ToolbarAction, self).__init__()
self.widget = None
self.value = False
self.checkable = False
self.enable_callback('activated')
def _cb_redirect(self, *args):
if self.checkable:
tf = self.get_state()
self.make_callback('activated', tf)
else:
self.make_callback('activated')
def set_state(self, tf):
self.value = tf
def get_state(self):
return self.value
def render(self):
self._rendered = True
return self.widget.render()
class Toolbar(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Toolbar, self).__init__()
self.orientation = orientation
self.widget = Box(orientation=orientation)
def add_action(self, text, toggle=False, iconpath=None, iconsize=None):
child = ToolbarAction()
self.text = text
if iconpath:
wd, ht = 24, 24
if iconsize is not None:
wd, ht = iconsize
native_image = PgHelp.get_icon(iconpath, size=(wd, ht),
format='png')
widget = Image(native_image=native_image)
widget.resize(wd, ht)
else:
widget = Button(text)
child.checkable = toggle
child.widget = widget
self.widget.add_widget(child, stretch=0)
return child
def add_widget(self, child):
self.add_ref(child)
self.make_callback('widget-added', child)
def add_menu(self, text, menu=None, mtype='tool'):
if menu is None:
menu = Menu()
child = self.add_action(text)
child.widget.add_callback('activated', lambda w: menu.popup())
return menu
def add_separator(self):
# self.widget.addSeparator()
pass
def render(self):
self._rendered = True
return self.widget.render()
class MenuAction(WidgetBase):
html_template = """%(item)s %(content)s"""
def __init__(self, text=None, checkable=False):
super(MenuAction, self).__init__()
self.widget = None
self.text = text
self.checkable = checkable
self.value = False
self.enable_callback('activated')
def _cb_redirect(self, *args):
if self.checkable:
self.make_callback('activated', self.value)
else:
self.make_callback('activated')
def render(self):
disabled = str(not self.enabled).lower()
content = ''
if self.widget is not None:
content = self.widget.render()
d = dict(id=self.id, item=self.text, disabled=disabled,
content=content,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class Menu(ContainerBase):
# case 1: not a top-level menu
html_template1 = """
<ul id='%(id)s' class="%(classes)s" style="%(styles)s">
%(content)s
</ul>
"""
# case 2: a top-level menu
html_template2 = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
<ul>
%(content)s
</ul>
</div>
<script type="text/javascript">
$(document).ready(function () {
$("#%(id)s").jqxMenu({
mode: 'popup', disabled: %(disabled)s });
$('#%(id)s').on('itemclick', function (event) {
// get the clicked LI element.
var elt = event.args;
var w_id = elt.getAttribute('data-menuitem-id');
ginga_app.widget_handler('activate', w_id, 'clicked');
});
// see python method popup() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'popup_menu',
function (elt, msg) {
var top = $(window).scrollTop();
var left = $(window).scrollLeft();
$(elt).jqxMenu('open', left + msg.x, top + msg.y);
});
});
</script>
"""
def __init__(self):
super(Menu, self).__init__()
# this ends up being a reference to the Pg menubar or toolbar
self.widget = None
self.menus = Bunch.Bunch(caseless=True)
def add_widget(self, child):
self.add_ref(child)
self.make_callback('widget-added', child)
def add_name(self, name, checkable=False):
child = MenuAction(text=name, checkable=checkable)
self.add_widget(child)
return child
def add_menu(self, name):
child = Menu()
self.menus[name] = child
act_w = self.add_name(name)
act_w.widget = child
return child
def get_menu(self, name):
return self.menus[name]
def add_separator(self):
# TODO
pass
def _cb_redirect(self, event):
# NOTE: this is called when they click only on the menu header
pass
def popup(self, widget=None):
# TODO: handle offset from widget
x, y = 0, 0
if self._rendered:
app = self.get_app()
app.do_operation('popup_menu', id=self.id, x=x, y=y)
def render(self):
content = ['''<li data-menuitem-id="%s"> %s </li>''' % (
child.id, child.render())
for child in self.get_children()]
disabled = str(not self.enabled).lower()
d = dict(id=self.id, content='\n'.join(content), disabled=disabled,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
if self.widget is not None:
return self.html_template1 % d
return self.html_template2 % d
class Menubar(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
<ul>
%(content)s
</ul>
</div>
<script type="text/javascript">
$(document).ready(function () {
$("#%(id)s").jqxMenu({ width: '%(width)s', height: '%(height)s',
disabled: %(disabled)s });
$('#%(id)s').on('itemclick', function (event) {
// get the clicked LI element.
var elt = event.args;
var w_id = elt.getAttribute('data-menuitem-id');
ginga_app.widget_handler('activate', w_id, 'clicked');
});
});
</script>
"""
def __init__(self):
super(Menubar, self).__init__()
self.menus = Bunch.Bunch(caseless=True)
self.thickness = 28
def add_widget(self, child, name):
if not isinstance(child, Menu):
raise ValueError("child widget needs to be a Menu object")
child.extdata.text = name
child.widget = self
self.menus[name] = child
self.add_ref(child)
self.make_callback('widget-added', child)
return child
def add_name(self, name):
child = Menu()
return self.add_widget(child, name)
def get_menu(self, name):
return self.menus[name]
def render(self):
# each child should be a Menu
content = ['''<li data-menuitem-id="%s"> %s %s </li>''' % (
child.id, child.extdata.get('text', ''), child.render())
for child in self.get_children()]
disabled = str(not self.enabled).lower()
d = dict(id=self.id, content='\n'.join(content), disabled=disabled,
width='100%', height=self.thickness,
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class Page(ContainerBase):
html_template = '''
<!doctype html>
<html>
<head>
<title>%(title)s</title>
<style>
body {
width: 100%%;
height: 100%%;
padding: 0px;
margin: 0px;
border: 0;
overflow-x: hidden; /* disable horizontal scrollbar */
display: block; /* no floating content on sides */
}
</style>
<meta name="viewport"
content="width=device-width, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no, target-densitydpi=device-dpi" />
</head>
<body>
%(script_imports)s
<!-- For Ginga -->
<link rel="stylesheet" href="/js/ginga_pg.css" type="text/css" />
<script type="text/javascript" src="/js/ginga_pg.js"></script>
<script type="text/javascript">
var wid = "%(wid)s";
var url = "%(url)s";
var ws_url = "ws://" + window.location.host + "/app/socket?wid=%(wid)s";
var ginga_app = ginga_make_application(ws_url, %(debug)s);
</script>
<div id=%(id)s>%(content)s</div>
</body>
</html>
'''
def __init__(self, title=""):
super(Page, self).__init__()
self.title = title
self.widget = None
# these are assigned by the Application()
self.wid = None
self.url = None
self.app = None
self.debug = False
self.script_imports = None
# widget.closeEvent = lambda event: self._quit(event)
self.enable_callback('close')
def set_widget(self, child):
self.add_ref(child)
def add_dialog(self, child):
self.add_ref(child)
if self._rendered:
app = self.get_app()
app.do_operation('append_child', id=self.id,
value=child.render())
self.make_callback('widget-added', child)
def show(self):
pass
def hide(self):
pass
def close(self):
self.make_callback('close')
def raise_(self):
pass
def lower(self):
pass
def resize(self, width, height):
# self.widget.resize(width, height)
pass
def focus(self):
pass
def move(self, x, y):
pass
def maximize(self):
pass
def unmaximize(self):
pass
def fullscreen(self):
pass
def unfullscreen(self):
pass
def iconify(self):
pass
def uniconify(self):
pass
def set_title(self, title):
self.title = title
def _cb_redirect(self, event):
pass
def render(self):
base_url = self.app.base_url
url = base_url + "?wid=%s" % (self.wid)
ws_url = base_url + "/socket?wid=%s" % (self.wid)
if self.debug:
debug = 'true'
else:
debug = 'false'
# prepare javascript imports
if self.script_imports is None:
self.script_imports = self.app.script_imports
script_imports = [self.app.script_decls[key]
for key in self.script_imports]
d = dict(title=self.title, content=self.render_children(),
wid=self.wid, id=self.id, url=url, ws_url=ws_url,
debug=debug, script_imports='\n'.join(script_imports),
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d # noqa
class TopLevel(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
%(content)s
</div>
<script type="text/javascript">
$(document).ready(function () {
$('#%(id)s').dialog({
autoOpen: true, modal: false,
autoResize: true,
title: "%(title)s",
closeOnEscape: false,
position: { x: 50, y: 50},
draggable: true, resizeable: true,
minWidth: 'auto', minHeight: 'auto',
width: 'auto', height: 'auto',
maxWidth: '100%%', maxHeight: '100%%',
});
// otherwise we get scrollbars in the dialog
$('#%(id)s').css('overflow', 'visible');
$('#%(id)s').on('beforeClose', function (event) {
ginga_app.widget_handler('dialog-close', '%(id)s', true);
});
var resize_timer;
$('#%(id)s').on("dialogresize", function (event, ui) {
event.preventDefault()
clearTimeout(resize_timer);
resize_timer = setTimeout(function () {
var payload = { width: ui.size.width,
height: ui.size.height,
x: ui.position.left,
y: ui.position.top }
ginga_app.resize_window();
ginga_app.widget_handler('dialog-resize', '%(id)s', payload);
}, 250);
});
// $('#%(id)s').on("dialogfocus", function (event, ui) {
// ginga_app.widget_handler('dialog-focus', '%(id)s', true);
// });
$('#%(id)s').on("dialogopen", function (event, ui) {
ginga_app.resize_window();
ginga_app.widget_handler('dialog-open', '%(id)s', true);
});
// see python method show() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'show_dialog',
function (elt, msg) {
$(elt).dialog('open');
});
// see python method hide() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'hide_dialog',
function (elt, msg) {
$(elt).dialog('close');
});
// see python method raise_() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'raise_dialog',
function (elt, msg) {
$(elt).dialog('moveToTop');
});
});
</script>
"""
def __init__(self, title="", parent=None):
super(TopLevel, self).__init__()
## if parent is None:
## raise ValueError("Top level 'parent' parameter required")
self.title = title
self.parent = parent
for name in ('open', 'close', 'resize'):
self.enable_callback(name)
self.set_margins(0, 0, 0, 0)
#self.add_css_classes([])
# NOTE: either use this or explicitly call add_dialog() on
# TopLevel widget!
## if parent is not None:
## parent.add_dialog(self)
def _cb_redirect(self, event):
if event.type == 'dialog-resize':
wd, ht = int(event.value['width']), int(event.value['height'])
self.make_callback('resize', (wd, ht))
elif event.type == 'dialog-open':
# TODO: don't allow dialog to be closed
self.make_callback('open')
elif event.type == 'dialog-close':
# TODO: don't allow dialog to be closed
self.make_callback('close')
def set_widget(self, child):
self.remove_all()
self.add_ref(child)
def show(self):
if self._rendered:
app = self.get_app()
app.do_operation('show_dialog', id=self.id)
def hide(self):
if self._rendered:
app = self.get_app()
app.do_operation('hide_dialog', id=self.id)
def raise_(self):
if self._rendered:
app = self.get_app()
app.do_operation('raise_dialog', id=self.id)
def lower(self):
pass
def focus(self):
pass
def move(self, x, y):
pass
def maximize(self):
pass
def unmaximize(self):
pass
def is_maximized(self):
return False
def fullscreen(self):
pass
def unfullscreen(self):
pass
def is_fullscreen(self):
return False
def iconify(self):
pass
def uniconify(self):
pass
def set_title(self, title):
self.title = title
def close(self):
self.make_callback('close')
def render_body(self):
if len(self.children) == 0:
return ""
return self.children[0].render()
def render(self):
wd, ht = self.get_size()
d = dict(id=self.id, title=self.title,
width=wd, height=ht,
content=self.render_body(),
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
class Application(Callback.Callbacks):
script_decls = {
'hammer': '''
<script type="text/javascript" src="/js/hammer.js"></script>
''',
'jquery': '''
<!-- jQuery foundation -->
<link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/smoothness/jquery-ui.css">
<script src="//code.jquery.com/jquery-1.12.4.js"></script>
<script src="//code.jquery.com/ui/1.12.1/jquery-ui.js"></script>
''',
'jqx': '''
<!-- For jQWidgets -->
<link rel="stylesheet" href="/js/jqwidgets/styles/jqx.base.css" type="text/css" />
<script type="text/javascript" src="/js/jqwidgets/jqxcore.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxdata.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxbuttons.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxscrollbar.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxsplitter.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxtabs.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxpanel.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxexpander.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxknob.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxprogressbar.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxmenu.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxtoolbar.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxdatatable.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxtreegrid.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxdraw.js"></script>
<script type="text/javascript" src="/js/jqwidgets/jqxnumberinput.js"></script>
''',
}
def __init__(self, logger=None, base_url=None,
host='localhost', port=9909, settings=None):
# NOTE: base_url parameter not used, but here for backward compatibility
global _app, widget_dict
super(Application, self).__init__()
self.logger = logger
if settings is None:
settings = Settings.SettingGroup(logger=self.logger)
self.settings = settings
self.settings.add_defaults(host=host, port=port)
self.base_url = self.settings.get('base_url', None)
self.window_dict = {}
self.wincnt = 0
# list of web socket handlers connected to this application
self.ws_handlers = []
# default sections from script imports to insert in web pages
# see Page widget, above
self.script_imports = ['hammer', 'jquery']
_app = self
widget_dict[0] = self
self._timer_lock = threading.RLock()
self._timers = []
self.t_ioloop = None
self.host = self.settings.get('host', 'localhost')
self.port = self.settings.get('port', 9909)
self.base_url = "http://%s:%d/app" % (self.host, self.port)
# Get screen size
# TODO: need to pass this from Web browser
self.screen_wd = 1600
self.screen_ht = 1200
# for tracking remote ecmascript calls
self.caller_id = 0
self.callers = {}
for name in ('shutdown', ):
self.enable_callback(name)
def get_screen_size(self):
return (self.screen_wd, self.screen_ht)
def process_events(self):
if self.t_ioloop is None:
raise Exception("No event loop was started for this thread")
tasks = asyncio.all_tasks(self.t_ioloop)
self.t_ioloop.run_until_complete(asyncio.gather(*tasks))
def process_end(self):
pass
def add_window(self, window, wid=None):
if wid is None:
wid = 'win%d' % (self.wincnt)
self.wincnt += 1
window.wid = wid
window.url = self.base_url + '?id=%s' % (wid)
window.app = self
self.window_dict[wid] = window
def get_window(self, wid):
return self.window_dict[wid]
def has_window(self, wid):
return wid in self.window_dict
def get_wids(self):
return list(self.window_dict.keys())
def make_window(self, title=None, wid=None):
w = Page(title=title)
self.add_window(w, wid=wid)
return w
def get_caller_id(self):
c_id, self.caller_id = self.caller_id, self.caller_id + 1
return c_id
def _cb_redirect(self, event):
#self.logger.debug("application got an event (%s)" % (str(event)))
pass
def add_ws_handler(self, handler):
with self._timer_lock:
self.ws_handlers.append(handler)
def do_operation(self, operation, **kwdargs):
self.logger.debug('---- (%d) operation: %s' % (
kwdargs.get('id', 0), operation))
with self._timer_lock:
handlers = list(self.ws_handlers)
bad_handlers = []
for handler in handlers:
try:
handler.do_operation(operation, **kwdargs)
except Exception as e:
self.logger.error("Error doing operation '%s': %s" % (
operation, str(e)))
bad_handlers.append(handler)
# remove problematic clients
if len(bad_handlers) > 0:
with self._timer_lock:
for handler in bad_handlers:
if handler in self.ws_handlers:
self.ws_handlers.remove(handler)
def on_timer_event(self, event):
"""internal event handler for timer events"""
# self.logger.debug("timer update")
with self._timer_lock:
expired = [timer for timer in self._timers
if (timer.deadline is not None and
time.time() > timer.deadline)]
for timer in expired:
timer.expire()
# self.logger.debug("update should have been called.")
def add_timer(self, timer):
"""internal method for timer management; see Timer class in PgHelp"""
with self._timer_lock:
if timer not in self._timers:
self._timers.append(timer)
def remove_timer(self, timer):
"""internal method for timer management; see Timer class in PgHelp"""
with self._timer_lock:
if timer in self._timers:
self._timers.remove(timer)
def make_timer(self):
return PgHelp.Timer(app=self)
def widget_event(self, event):
"""internal method for event management"""
if event.type == 'timer':
self.on_timer_event(event)
return
# get the widget associated with this id
w_id = event.id
self.logger.debug('----(%s) event: %s' % (w_id, event))
if event.type == 'ecma_call_result':
caller_id = event.value['caller_id']
f = self.callers.get(caller_id, None)
if f is not None:
del self.callers[caller_id]
f.resolve(event.value['caller_result'])
return
try:
w_id = int(event.id)
widget = widget_dict[w_id]
# make the callback for this widget (activation or value-changed)
widget._cb_redirect(event)
except KeyError:
self.logger.error("Event '%s' from unknown widget (id=%s)" % (
str(event), w_id))
def start(self, no_ioloop=False):
import tornado.web
from ginga.web.pgw import PgHelp, js
js_path = os.path.dirname(js.__file__)
# create and run the app
self.server = tornado.web.Application([
# (r"/js/(.*\.js)", tornado.web.StaticFileHandler,
(r"/js/(.*)", tornado.web.StaticFileHandler,
{"path": js_path}),
(r"/js/jquery/(.*)", tornado.web.StaticFileHandler,
{"path": os.path.join(js_path, 'jquery')}),
(r"/app", PgHelp.WindowHandler,
dict(name='Application', url='/app', app=self)),
(r"/app/socket", PgHelp.ApplicationHandler,
dict(name='ApplicationSocketInterface', app=self)),
], app=self, logger=self.logger)
self.t_ioloop = None
try:
# NOTE: tornado now uses the asyncio event loop
self.t_ioloop = asyncio.get_running_loop()
except RuntimeError as ex:
if no_ioloop:
raise ex
# TODO: really just want to check for this exception:
# "There is no current event loop in thread ..."
self.t_ioloop = asyncio.new_event_loop()
asyncio.set_event_loop(self.t_ioloop)
self.server.listen(self.port, self.host)
self.logger.info("ginga web now running at " + self.base_url)
def stop(self):
# how to stop tornado server?
if self.t_ioloop is not None:
self.t_ioloop.stop()
self.ev_quit.set()
def mainloop(self, no_ioloop=False):
self.start(no_ioloop=no_ioloop)
if self.t_ioloop is None:
raise Exception("No event loop was started for this thread")
while not self.t_ioloop.is_closed():
self.t_ioloop.run_forever()
def quit(self):
self.stop()
class Dialog(ContainerBase):
html_template = """
<div id='%(id)s' class="%(classes)s" style="%(styles)s">
%(content)s
</div>
<script type="text/javascript">
$(document).ready(function () {
$('#%(id)s').dialog({
autoOpen: false, modal: %(modal)s,
autoResize: true,
title: "%(title)s",
closeOnEscape: false,
position: { x: 50, y: 50},
draggable: true, resizeable: true,
minWidth: 'auto', minHeight: 'auto',
width: 'auto', height: 'auto',
maxWidth: '100%%', maxHeight: '100%%',
});
// otherwise we get scrollbars in the dialog
$('#%(id)s').css('overflow', 'visible');
$('#%(id)s').on('beforeClose', function (event) {
ginga_app.widget_handler('dialog-close', '%(id)s', true);
});
var resize_timer;
$('#%(id)s').on("dialogresize", function (event, ui) {
event.preventDefault()
clearTimeout(resize_timer);
resize_timer = setTimeout(function () {
var payload = { width: ui.size.width,
height: ui.size.height,
x: ui.position.left,
y: ui.position.top }
ginga_app.resize_window();
ginga_app.widget_handler('dialog-resize', '%(id)s', payload);
}, 250);
});
// $('#%(id)s').on("dialogfocus", function (event, ui) {
// ginga_app.widget_handler('dialog-focus', '%(id)s', true);
// });
$('#%(id)s').on("dialogopen", function (event, ui) {
ginga_app.resize_window();
ginga_app.widget_handler('dialog-open', '%(id)s', true);
});
// see python method show() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'show_dialog',
function (elt, msg) {
$(elt).dialog('open');
});
// see python method hide() in this widget
ginga_app.add_widget_custom_method('%(id)s', 'hide_dialog',
function (elt, msg) {
$(elt).dialog('close');
});
});
</script>
"""
def __init__(self, title='', flags=None, buttons=[],
parent=None, callback=None, modal=False):
super(Dialog, self).__init__()
## if parent is None:
## raise ValueError("Top level 'parent' parameter required")
self.title = title
self.parent = parent
self.buttons = buttons
self.value = None
self.modal = modal
self.body = VBox()
for name in ('activated', 'open', 'close', 'resize'):
self.enable_callback(name)
if callback:
self.add_callback('activated', callback)
if len(buttons) == 0:
self.content = self.body
else:
self.content = VBox()
self.body.add_widget(self.content, stretch=1)
hbox = HBox()
hbox.set_spacing(4)
for name, val in buttons:
btn = Button(name)
btn.add_callback('activated', self._btn_choice, name, val)
hbox.add_widget(btn)
self.body.add_widget(hbox, stretch=0)
self.set_margins(0, 0, 0, 0)
#self.add_css_classes([])
# NOTE: either use this or explicitly call add_dialog() on
# Page widget!
## if parent is not None:
## parent.add_dialog(self)
def _cb_redirect(self, event):
if event.type == 'dialog-resize':
wd, ht = int(event.value['width']), int(event.value['height'])
self.make_callback('resize', (wd, ht))
elif event.type == 'dialog-open':
# TODO: don't allow dialog to be closed
self.make_callback('open')
elif event.type == 'dialog-close':
# TODO: don't allow dialog to be closed
self.make_callback('close')
def _btn_choice(self, btn_w, name, val):
# user clicked one of the supplied buttons
self.make_callback('activated', val)
def get_content_area(self):
return self.content
def show(self):
if self._rendered:
app = self.get_app()
app.do_operation('show_dialog', id=self.id)
def hide(self):
if self._rendered:
app = self.get_app()
app.do_operation('hide_dialog', id=self.id)
def raise_(self):
self.show()
def close(self):
self.make_callback('close')
def render(self):
wd, ht = self.get_size()
d = dict(id=self.id, body_id=self.body.id, title=self.title,
width=wd, height=ht,
modal=str(self.modal).lower(),
content=self.body.render(),
classes=self.get_css_classes(fmt='str'),
styles=self.get_css_styles(fmt='str'))
self._rendered = True
return self.html_template % d
# class SaveDialog(QtGui.QFileDialog):
# def __init__(self, title=None, selectedfilter=None):
# super(SaveDialog, self).__init__()
#
# self.selectedfilter = selectedfilter
# self.widget = self.getSaveFileName(self, title, '', selectedfilter)
#
# def get_path(self):
# if self.widget and not self.widget.endswith(self.selectedfilter[1:]):
# self.widget += self.selectedfilter[1:]
# return self.widget
# MODULE FUNCTIONS
def name_mangle(name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def make_widget(title, wtype):
if wtype == 'label':
w = Label(title)
# w.widget.setAlignment(QtCore.Qt.AlignRight)
elif wtype == 'llabel':
w = Label(title)
# w.widget.setAlignment(QtCore.Qt.AlignLeft)
elif wtype == 'entry':
w = TextEntry()
# w.widget.setMaxLength(12)
elif wtype == 'entryset':
w = TextEntrySet()
# w.widget.setMaxLength(12)
elif wtype == 'combobox':
w = ComboBox()
elif wtype == 'spinbutton':
w = SpinBox(dtype=int)
elif wtype == 'spinfloat':
w = SpinBox(dtype=float)
elif wtype == 'vbox':
w = VBox()
elif wtype == 'hbox':
w = HBox()
elif wtype in ('hslider', 'hscale'):
w = Slider(orientation='horizontal')
elif wtype in ('vslider', 'vscale'):
w = Slider(orientation='vertical')
elif wtype in ('checkbox', 'checkbutton'):
w = CheckBox(title)
elif wtype == 'radiobutton':
w = RadioButton(title)
elif wtype == 'togglebutton':
w = ToggleButton(title)
elif wtype == 'button':
w = Button(title)
elif wtype == 'spacer':
w = Label('')
elif wtype == 'textarea':
w = TextArea(editable=True)
elif wtype == 'toolbar':
w = Toolbar()
elif wtype == 'progress':
w = ProgressBar()
elif wtype == 'menubar':
w = Menubar()
elif wtype == 'dial':
w = Dial()
else:
raise ValueError("Bad wtype=%s" % wtype)
return w
def hadjust(w, orientation):
"""Ostensibly, a function to reduce the vertical footprint of a widget
that is normally used in a vertical stack (usually a Splitter), when it
is instead used in a horizontal orientation.
"""
if orientation != 'horizontal':
return w
# This currently does not seem to be needed for most plugins that are
# coded to flow either vertically or horizontally and, in fact, reduces
# the visual asthetic somewhat.
## spl = Splitter(orientation='vertical')
## spl.add_widget(w)
## spl.add_widget(Label(''))
## return spl
return w
def build_info(captions, orientation='vertical'):
numrows = len(captions)
numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
if (numcols % 2) != 0:
raise ValueError("Column spec is not an even number")
numcols = int(numcols // 2)
table = GridBox(rows=numrows, columns=numcols)
wb = Bunch.Bunch()
row = 0
for tup in captions:
col = 0
while col < numcols:
idx = col * 2
if idx < len(tup):
title, wtype = tup[idx:idx + 2]
if not title.endswith(':'):
name = name_mangle(title)
else:
name = name_mangle('lbl_' + title[:-1])
w = make_widget(title, wtype)
table.add_widget(w, row, col)
wb[name] = w
col += 1
row += 1
w = hadjust(table, orientation=orientation)
return w, wb
def wrap(native_widget):
wrapper = WidgetBase()
wrapper.widget = native_widget
return wrapper
# END
| bsd-3-clause | 3c91c1225e82d86a00d0cf744cf87fbd | 32.007309 | 158 | 0.522197 | 3.852295 | false | false | false | false |
ejeschke/ginga | ginga/examples/qt/example2_qt.py | 2 | 10582 | #! /usr/bin/env python
#
# example2_qt.py -- Simple, configurable FITS viewer.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
from ginga.qtw.QtHelp import QtGui, QtCore, set_default_opengl_context
from ginga import colors
from ginga.qtw.ImageViewQt import CanvasView
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.misc import log
from ginga.util.loader import load_data
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger, render='widget'):
super(FitsViewer, self).__init__()
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
fi = CanvasView(logger, render=render)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.set_zoom_algorithm('rate')
fi.set_zoomrate(1.4)
fi.show_pan_mark(True)
fi.add_callback('drag-drop', self.drop_file_cb)
fi.add_callback('cursor-changed', self.cursor_cb)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_set_active(True)
self.fitsimage = fi
bd = fi.get_bindings()
bd.enable_all(True)
# canvas that we will draw on
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue')
canvas.register_for_cursor_drawing(fi)
canvas.add_callback('draw-event', self.draw_cb)
canvas.set_draw_mode('draw')
canvas.set_surface(fi)
canvas.ui_set_active(True)
self.canvas = canvas
# add our new canvas to viewers default canvas
fi.get_canvas().add(canvas)
self.drawtypes = canvas.get_drawtypes()
self.drawtypes.sort()
# add a color bar
#fi.show_color_bar(True)
#fi.show_focus_indicator(True)
# add little mode indicator that shows keyboard modal states
fi.show_mode_indicator(True, corner='ur')
w = fi.get_widget()
w.resize(512, 512)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wdrawtype = QtGui.QComboBox()
for name in self.drawtypes:
wdrawtype.addItem(name)
index = self.drawtypes.index('rectangle')
wdrawtype.setCurrentIndex(index)
wdrawtype.activated.connect(self.set_drawparams)
self.wdrawtype = wdrawtype
wdrawcolor = QtGui.QComboBox()
for name in self.drawcolors:
wdrawcolor.addItem(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.setCurrentIndex(index)
wdrawcolor.activated.connect(self.set_drawparams)
self.wdrawcolor = wdrawcolor
wfill = QtGui.QCheckBox("Fill")
wfill.stateChanged.connect(self.set_drawparams)
self.wfill = wfill
walpha = QtGui.QDoubleSpinBox()
walpha.setRange(0.0, 1.0)
walpha.setSingleStep(0.1)
walpha.setValue(1.0)
walpha.valueChanged.connect(self.set_drawparams)
self.walpha = walpha
wclear = QtGui.QPushButton("Clear Canvas")
wclear.clicked.connect(self.clear_canvas)
wopen = QtGui.QPushButton("Open File")
wopen.clicked.connect(self.open_file)
wquit = QtGui.QPushButton("Quit")
wquit.clicked.connect(self.quit)
hbox.addStretch(1)
for w in (wopen, wdrawtype, wdrawcolor, wfill,
QtGui.QLabel('Alpha:'), walpha, wclear, wquit):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
vbox.addWidget(hw, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
btn1 = QtGui.QRadioButton("Draw")
btn1.setChecked(mode == 'draw')
btn1.toggled.connect(lambda val: self.set_mode_cb('draw', val))
btn1.setToolTip("Choose this to draw on the canvas")
hbox.addWidget(btn1)
btn2 = QtGui.QRadioButton("Edit")
btn2.setChecked(mode == 'edit')
btn2.toggled.connect(lambda val: self.set_mode_cb('edit', val))
btn2.setToolTip("Choose this to edit things on the canvas")
hbox.addWidget(btn2)
btn3 = QtGui.QRadioButton("Pick")
btn3.setChecked(mode == 'pick')
btn3.toggled.connect(lambda val: self.set_mode_cb('pick', val))
btn3.setToolTip("Choose this to pick things on the canvas")
hbox.addWidget(btn3)
hbox.addWidget(QtGui.QLabel(''), stretch=1)
hw = QtGui.QWidget()
hw.setLayout(hbox)
vbox.addWidget(hw, stretch=0)
vw = QtGui.QWidget()
self.setCentralWidget(vw)
vw.setLayout(vbox)
def set_drawparams(self, kind):
index = self.wdrawtype.currentIndex()
kind = self.drawtypes[index]
index = self.wdrawcolor.currentIndex()
fill = (self.wfill.checkState() != 0)
alpha = self.walpha.value()
params = {'color': self.drawcolors[index],
'alpha': alpha,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.delete_all_objects()
def load_file(self, filepath):
image = load_data(filepath, logger=self.logger)
self.fitsimage.set_image(image)
self.setWindowTitle(filepath)
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file_cb(self, fitsimage, paths):
fileName = paths[0]
self.load_file(fileName)
def cursor_cb(self, viewer, button, data_x, data_y):
"""This gets called when the data position relative to the cursor
changes.
"""
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x + viewer.data_off),
int(data_y + viewer.data_off))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def set_mode_cb(self, mode, tf):
self.logger.info("canvas mode changed (%s) %s" % (mode, tf))
if not (tf is False):
self.canvas.set_draw_mode(mode)
return True
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
obj.add_callback('pick-down', self.pick_cb, 'down')
obj.add_callback('pick-up', self.pick_cb, 'up')
obj.add_callback('pick-move', self.pick_cb, 'move')
obj.add_callback('pick-hover', self.pick_cb, 'hover')
obj.add_callback('pick-enter', self.pick_cb, 'enter')
obj.add_callback('pick-leave', self.pick_cb, 'leave')
obj.add_callback('pick-key', self.pick_cb, 'key')
obj.pickable = True
obj.add_callback('edited', self.edit_cb)
def pick_cb(self, obj, canvas, event, pt, ptype):
self.logger.info("pick event '%s' with obj %s at (%.2f, %.2f)" % (
ptype, obj.kind, pt[0], pt[1]))
return True
def edit_cb(self, obj):
self.logger.info("object %s has been edited" % (obj.kind))
return True
def quit(self, *args):
self.logger.info("Attempting to shut down the application...")
self.deleteLater()
def main(options, args):
if options.render == 'opengl':
set_default_opengl_context()
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication(args)
logger = log.get_logger("example2", options=options)
w = FitsViewer(logger, render=options.render)
w.resize(524, 540)
w.show()
app.setActiveWindow(w)
w.raise_()
w.activateWindow()
if len(args) > 0:
w.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("-r", "--render", dest="render", default='widget',
help="Set render type {widget|scene|opengl}")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
log.addlogopts(argprs)
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause | 3389f78323bc48aa18ddd2cb35370b61 | 31.660494 | 100 | 0.584389 | 3.642685 | false | false | false | false |
ejeschke/ginga | ginga/web/pgw/ImageViewPg.py | 2 | 21912 | #
# ImageViewPg.py -- a backend for Ginga using javascript and
# HTML5 canvas and websockets
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import ImageView, Mixins, Bindings
from ginga.canvas import render
default_html_fmt = 'jpeg'
class ImageViewPgError(ImageView.ImageViewError):
pass
class ImageViewPg(ImageView.ImageViewBase):
def __init__(self, logger=None, rgbmap=None, settings=None, render=None):
ImageView.ImageViewBase.__init__(self, logger=logger,
rgbmap=rgbmap, settings=settings)
self.pgcanvas = None
# format for rendering image on HTML5 canvas
# NOTE: 'jpeg' has much better performance than 'png', but can show
# some artifacts, especially noticeable with small text
self.t_.set_defaults(html5_canvas_format=default_html_fmt,
renderer='cairo')
self.rgb_order = 'RGBA'
# this should already be so, but just in case...
self.defer_redraw = True
# these will be assigned in set_widget()
self.timer_redraw = None
self.timer_msg = None
self.renderer = None
# Pick a renderer that can work with us
renderers = ['cairo', 'pil', 'opencv', 'agg']
preferred = self.t_['renderer']
if preferred in renderers:
renderers.remove(preferred)
self.possible_renderers = [preferred] + renderers
self.choose_best_renderer()
def set_widget(self, canvas_w):
"""Call this method with the widget that will be used
for the display.
"""
self.logger.debug("set widget canvas_w=%s" % canvas_w)
self.pgcanvas = canvas_w
app = canvas_w.get_app()
self.timer_redraw = app.make_timer()
self.timer_redraw.add_callback('expired',
lambda t: self.delayed_redraw())
self.timer_msg = app.make_timer()
self.timer_msg.add_callback('expired',
lambda t: self.clear_onscreen_message())
wd, ht = canvas_w.get_size()
self.configure_window(wd, ht)
def get_widget(self):
return self.pgcanvas
def choose_renderer(self, name):
klass = render.get_render_class(name)
self.renderer = klass(self)
if self.pgcanvas is not None:
wd, ht = self.pgcanvas_w.get_size()
self.configure_window(wd, ht)
def choose_best_renderer(self):
for name in self.possible_renderers:
try:
self.choose_renderer(name)
self.logger.info("best renderer available is '{}'".format(name))
return
except Exception as e:
# uncomment to troubleshoot
## self.logger.error("Error choosing renderer '{}': {}".format(name, e),
## exc_info=True)
continue
raise ImageViewPgError("No valid renderers available: {}".format(str(self.possible_renderers)))
def update_widget(self):
self.logger.debug("update_widget pgcanvas=%s" % self.pgcanvas)
if self.pgcanvas is None:
return
try:
self.logger.debug("getting image as buffer...")
format = self.t_.get('html5_canvas_format', default_html_fmt)
buf = self.renderer.get_surface_as_rgb_format_bytes(
format=format, quality=90)
self.logger.debug("got '%s' RGB image buffer, len=%d" % (
format, len(buf)))
self.pgcanvas.do_update(buf)
except Exception as e:
self.logger.error("Couldn't update canvas: %s" % (str(e)))
def reschedule_redraw(self, time_sec):
if self.pgcanvas is not None:
self.timer_redraw.stop()
self.timer_redraw.start(time_sec)
else:
self.delayed_redraw()
def get_plain_image_as_widget(self):
"""Does not include overlaid graphics."""
image_buf = self.renderer.get_surface_as_rgb_format_buffer()
return image_buf.getvalue()
def save_plain_image_as_file(self, filepath, format='png', quality=90):
"""Does not include overlaid graphics."""
pass
def set_cursor(self, cursor):
if self.pgcanvas is None:
return
#self.pgcanvas.config(cursor=cursor)
def onscreen_message(self, text, delay=None, redraw=True):
if self.pgcanvas is None:
return
self.timer_msg.stop()
self.set_onscreen_message(text, redraw=redraw)
if delay is not None:
self.timer_msg.start(delay)
def clear_onscreen_message(self):
self.logger.debug("clearing message...")
self.onscreen_message(None)
def configure_window(self, width, height):
self.configure(width, height)
def map_event(self, event):
self.logger.info("window mapped to %dx%d" % (
event.width, event.height))
self.configure_window(event.width, event.height)
self.redraw(whence=0)
def resize_event(self, event):
wd, ht = event.width, event.height
# Not quite ready for prime-time--browser seems to mess with the
# aspect ratio
self.logger.info("canvas resized to %dx%d" % (wd, ht))
self.configure_window(wd, ht)
self.redraw(whence=0)
def resize(self, width, height):
"""Resize our window to width x height.
May not work---depending on how the HTML5 canvas is embedded.
"""
# this shouldn't be needed
self.configure_window(width, height)
self.pgcanvas.resize(width, height)
# hack to force a browser reload
app = self.pgcanvas.get_app()
app.do_operation('reload_page', id=self.pgcanvas.id)
class ImageViewEvent(ImageViewPg):
def __init__(self, logger=None, rgbmap=None, settings=None, render=None):
ImageViewPg.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings, render=render)
self._button = 0
# @$%&^(_)*&^ javascript!!
# table mapping javascript key codes to ginga key names
# see key_down_event() and key_up_event()
#
# https://www.cambiaresearch.com/articles/15/javascript-char-codes-key-codes
self._keytbl = {
8: 'backspace',
9: 'tab',
13: 'return',
16: 'shift_l',
#'shift_r': 'shift_r',
17: 'control_l',
#'control_r': 'control_r',
18: 'alt_l',
#'alt_r': 'alt_r',
19: 'break',
20: 'caps_lock',
27: 'escape',
32: 'space',
33: 'page_up',
34: 'page_down',
35: 'end',
36: 'home',
37: 'left',
38: 'up',
39: 'right',
40: 'down',
45: 'insert',
46: 'delete',
65: 'a',
66: 'b',
67: 'c',
68: 'd',
69: 'e',
70: 'f',
71: 'g',
72: 'h',
73: 'i',
74: 'j',
75: 'k',
76: 'l',
77: 'm',
78: 'n',
79: 'o',
80: 'p',
81: 'q',
82: 'r',
83: 's',
84: 't',
85: 'u',
86: 'v',
87: 'w',
88: 'x',
89: 'y',
90: 'z',
91: 'super_l',
92: 'super_r',
93: 'menu_r',
96: 'numpad_0',
97: 'numpad_1',
98: 'numpad_2',
99: 'numpad_3',
100: 'numpad_4',
101: 'numpad_5',
102: 'numpad_6',
103: 'numpad_7',
104: 'numpad_8',
105: 'numpad_9',
106: 'numpad_*',
107: 'numpad_+',
109: 'numpad_-',
110: 'numpad_.',
111: 'numpad_/',
112: 'f1',
113: 'f2',
114: 'f3',
115: 'f4',
116: 'f5',
117: 'f6',
118: 'f7',
119: 'f8',
120: 'f9',
121: 'f10',
122: 'f11',
123: 'f12',
144: 'num_lock',
145: 'scroll_lock',
189: '-',
186: ';',
187: '=',
188: ',',
190: '.',
191: '/',
192: 'backquote',
219: '[',
220: 'backslash',
221: ']',
222: 'singlequote',
}
# this is an auxilliary table used to map shifted keys to names
# see key_down_event() and key_up_event()
self._keytbl2 = {
('shift_l', 'backquote'): '~',
('shift_l', '1'): '!',
('shift_l', '2'): '@',
('shift_l', '3'): '#',
('shift_l', '4'): '$',
('shift_l', '5'): '%',
('shift_l', '6'): '^',
('shift_l', '7'): '&',
('shift_l', '8'): '*',
('shift_l', '9'): '(',
('shift_l', '0'): ')',
('shift_l', 'a'): 'A',
('shift_l', 'b'): 'B',
('shift_l', 'c'): 'C',
('shift_l', 'd'): 'D',
('shift_l', 'e'): 'E',
('shift_l', 'f'): 'F',
('shift_l', 'g'): 'G',
('shift_l', 'h'): 'H',
('shift_l', 'i'): 'I',
('shift_l', 'j'): 'J',
('shift_l', 'k'): 'K',
('shift_l', 'l'): 'L',
('shift_l', 'm'): 'M',
('shift_l', 'n'): 'N',
('shift_l', 'o'): 'O',
('shift_l', 'p'): 'P',
('shift_l', 'q'): 'Q',
('shift_l', 'r'): 'R',
('shift_l', 's'): 'S',
('shift_l', 't'): 'T',
('shift_l', 'u'): 'U',
('shift_l', 'v'): 'V',
('shift_l', 'w'): 'W',
('shift_l', 'x'): 'X',
('shift_l', 'y'): 'Y',
('shift_l', 'z'): 'Z',
('shift_l', '-'): '_',
('shift_l', '='): '+',
('shift_l', '['): '{',
('shift_l', ']'): '}',
('shift_l', 'backslash'): '|',
('shift_l', ';'): ':',
('shift_l', 'singlequote'): 'doublequote',
('shift_l', ','): '<',
('shift_l', '.'): '>',
('shift_l', '/'): '?',
}
# this table is used to map special characters to character names
# see key_press_event()
self._keytbl3 = {
'\\': 'backslash',
'"': 'doublequote',
"'": 'singlequote',
"`": 'backquote',
" ": 'space',
}
# list of keys for which javascript will give us a keydown event,
# but not a keypress event. We use this list to synthesize one.
self._browser_problem_keys = set(['shift_l', 'control_l', 'alt_l',
'super_l', 'super_r', 'menu_r',
'escape', 'tab',
'left', 'up', 'right', 'down',
'insert', 'delete', 'home', 'end',
'page_up', 'page_down',
])
# Define cursors for pick and pan
#hand = openHandCursor()
hand = 'fleur'
self.define_cursor('pan', hand)
cross = 'cross'
self.define_cursor('pick', cross)
self._shifted = False
for name in ('motion', 'button-press', 'button-release',
'key-press', 'key-release', 'drag-drop',
'scroll', 'map', 'focus', 'enter', 'leave',
'pinch', 'rotate', 'pan', 'swipe', 'tap'):
self.enable_callback(name)
def set_widget(self, canvas):
super(ImageViewEvent, self).set_widget(canvas)
# see event binding setup in Viewers.py
#return self.make_callback('map')
def transkey(self, keycode):
self.logger.debug("key code in js '%d'" % (keycode))
if keycode in self._keytbl:
key = self._keytbl[keycode]
else:
key = chr(keycode)
if self._shifted:
try:
key = self._keytbl2[('shift_l', key)]
except KeyError:
pass
self.logger.debug("key name in ginga '%s'" % (key))
return key
def get_key_table(self):
return self._keytbl
def focus_event(self, event, has_focus):
self.logger.debug("focus event: focus=%s" % (has_focus))
return self.make_callback('focus', has_focus)
def enter_notify_event(self, event):
self.logger.debug("entering widget...")
## enter_focus = self.t_.get('enter_focus', False)
## if enter_focus:
## self.pgcanvas.focus_set()
return self.make_callback('enter')
def leave_notify_event(self, event):
self.logger.debug("leaving widget...")
return self.make_callback('leave')
def key_press_event(self, event):
# For key_press_events, javascript reports the actual printable
# key name. We use a special keymap to just handle the few
# characters for which we have special names
keyname = event.key_name
self.logger.debug("key press event, keyname=%s" % (keyname))
if keyname in self._keytbl3:
keyname = self._keytbl3[keyname]
self.logger.debug("making key-press cb, key=%s" % (keyname))
return self.make_ui_callback_viewer(self, 'key-press', keyname)
def key_down_event(self, event):
# For key down events, javascript only validly reports a key code.
# We look up the code to determine the
keycode = event.key_code
self.logger.debug("key down event, keycode=%s" % (keycode))
keyname = self.transkey(keycode)
# special hack for modifiers
if keyname == 'shift_l':
self._shifted = True
if keyname in self._browser_problem_keys:
# JS doesn't report key press callbacks for certain keys
# so we synthesize one here for those
self.logger.debug("making key-press cb, key=%s" % (keyname))
return self.make_ui_callback_viewer(self, 'key-press', keyname)
return False
def key_up_event(self, event):
keycode = event.key_code
self.logger.debug("key release event, keycode=%s" % (keycode))
keyname = self.transkey(keycode)
# special hack for modifiers
if keyname == 'shift_l':
self._shifted = False
self.logger.debug("making key-release cb, key=%s" % (keyname))
return self.make_ui_callback_viewer(self, 'key-release', keyname)
def button_press_event(self, event):
x = event.x
y = event.y
self.last_win_x, self.last_win_y = x, y
button = 0
button |= 0x1 << event.button
self._button = button
self.logger.debug("button event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'button-press', button,
data_x, data_y)
def button_release_event(self, event):
# event.button, event.x, event.y
x = event.x
y = event.y
self.last_win_x, self.last_win_y = x, y
button = 0
button |= 0x1 << event.button
self._button = 0
self.logger.debug("button release at %dx%d button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'button-release', button,
data_x, data_y)
def motion_notify_event(self, event):
#button = 0
button = self._button
x, y = event.x, event.y
self.last_win_x, self.last_win_y = x, y
self.logger.debug("motion event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'motion', button,
data_x, data_y)
def scroll_event(self, event):
x, y = event.x, event.y
delta = event.delta
dx, dy = event.dx, event.dy
self.last_win_x, self.last_win_y = x, y
if (dx != 0 or dy != 0):
# <= This browser gives us deltas for x and y
# Synthesize this as a pan gesture event
self.make_ui_callback_viewer(self, 'pan', 'start', 0, 0)
self.make_ui_callback_viewer(self, 'pan', 'move', dx, dy)
return self.make_ui_callback_viewer(self, 'pan', 'stop', 0, 0)
# 15 deg is standard 1-click turn for a wheel mouse
# delta usually returns +/- 1.0
num_degrees = abs(delta) * 15.0
direction = 0.0
if delta > 0:
direction = 0.0
elif delta < 0:
direction = 180.0
self.logger.debug("scroll deg=%f direction=%f" % (
num_degrees, direction))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback_viewer(self, 'scroll', direction,
num_degrees, data_x, data_y)
def drop_event(self, event):
data = event.delta
self.logger.debug("data=%s" % (str(data)))
paths = data.split('\n')
self.logger.debug("dropped text(s): %s" % (str(paths)))
return self.make_ui_callback_viewer(self, 'drag-drop', paths)
def pinch_event(self, event):
self.logger.debug("pinch: event=%s" % (str(event)))
state = 'move'
if event.type == 'pinchstart' or event.isfirst:
state = 'start'
elif event.type == 'pinchend' or event.isfinal:
state = 'end'
rot = event.theta
scale = event.scale
self.logger.debug("pinch gesture rot=%f scale=%f state=%s" % (
rot, scale, state))
return self.make_ui_callback_viewer(self, 'pinch', state, rot, scale)
def rotate_event(self, event):
state = 'move'
if event.type == 'rotatestart' or event.isfirst:
state = 'start'
elif event.type == 'rotateend' or event.isfinal:
state = 'end'
rot = event.theta
self.logger.debug("rotate gesture rot=%f state=%s" % (
rot, state))
return self.make_ui_callback_viewer(self, 'rotate', state, rot)
def pan_event(self, event):
state = 'move'
if event.type == 'panstart' or event.isfirst:
state = 'start'
elif event.type == 'panend' or event.isfinal:
state = 'end'
# TODO: need to know which ones to flip
dx, dy = -event.dx, event.dy
self.logger.debug("pan gesture dx=%f dy=%f state=%s" % (
dx, dy, state))
return self.make_ui_callback_viewer(self, 'pan', state, dx, dy)
def swipe_event(self, event):
if event.isfinal:
state = 'end' # noqa
self.logger.debug("swipe gesture event=%s" % (str(event)))
## self.logger.debug("swipe gesture hdir=%s vdir=%s" % (
## hdir, vdir))
## return self.make_ui_callback_viewer(self, 'swipe', state,
## hdir, vdir)
def tap_event(self, event):
if event.isfinal:
state = 'end' # noqa
self.logger.debug("tap gesture event=%s" % (str(event)))
class ImageViewZoom(Mixins.UIMixin, ImageViewEvent):
# class variables for binding map and bindings can be set
bindmapClass = Bindings.BindingMapper
bindingsClass = Bindings.ImageViewBindings
@classmethod
def set_bindingsClass(cls, klass):
cls.bindingsClass = klass
@classmethod
def set_bindmapClass(cls, klass):
cls.bindmapClass = klass
def __init__(self, logger=None, rgbmap=None, settings=None,
render='widget',
bindmap=None, bindings=None):
ImageViewEvent.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings, render=render)
Mixins.UIMixin.__init__(self)
self.ui_set_active(True, viewer=self)
if bindmap is None:
bindmap = ImageViewZoom.bindmapClass(self.logger)
self.bindmap = bindmap
bindmap.register_for_events(self)
if bindings is None:
bindings = ImageViewZoom.bindingsClass(self.logger)
self.set_bindings(bindings)
def get_bindmap(self):
return self.bindmap
def get_bindings(self):
return self.bindings
def set_bindings(self, bindings):
self.bindings = bindings
bindings.set_bindings(self)
def center_cursor(self):
# NOP
pass
def position_cursor(self, data_x, data_y):
# NOP
pass
class CanvasView(ImageViewZoom):
def __init__(self, logger=None, settings=None, rgbmap=None,
render='widget',
bindmap=None, bindings=None):
ImageViewZoom.__init__(self, logger=logger, settings=settings,
rgbmap=rgbmap, render=render,
bindmap=bindmap, bindings=bindings)
# Needed for UIMixin to propagate events correctly
self.objects = [self.private_canvas]
def set_canvas(self, canvas, private_canvas=None):
super(CanvasView, self).set_canvas(canvas,
private_canvas=private_canvas)
self.objects[0] = self.private_canvas
| bsd-3-clause | 9ac450b303ef0574113557adc8edede4 | 32.762712 | 103 | 0.504336 | 3.741803 | false | false | false | false |
ejeschke/ginga | ginga/rv/main.py | 2 | 30899 | #
# main.py -- reference viewer for the Ginga toolkit.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""This module handles the main reference viewer."""
# stdlib imports
import glob
import sys
import os
import logging
import logging.handlers
import threading
import traceback
if sys.version_info < (3, 8):
# Python 3.7
from importlib_metadata import entry_points
else:
from importlib.metadata import entry_points
# Local application imports
from ginga.misc.Bunch import Bunch
from ginga.misc import Task, ModuleManager, Settings, log
import ginga.version as version
import ginga.toolkit as ginga_toolkit
from ginga.util import paths, rgb_cms, json
# Catch warnings
logging.captureWarnings(True)
__all__ = ['ReferenceViewer']
default_layout = ['seq', {},
['vbox', dict(name='top', width=1400, height=700), # noqa
dict(row=['hbox', dict(name='menu')],
stretch=0),
dict(row=['hpanel', dict(name='hpnl'),
['ws', dict(name='left', wstype='tabs', # noqa
width=300, height=-1, group=2),
# (tabname, layout), ...
[("Info", ['vpanel', {},
['ws', dict(name='uleft', wstype='stack',
height=250, group=3)],
['ws', dict(name='lleft', wstype='tabs',
height=330, group=3)],
]
)]],
['vbox', dict(name='main', width=600),
dict(row=['ws', dict(name='channels', wstype='tabs',
group=1, use_toolbar=True,
default=True)],
stretch=1),
dict(row=['ws', dict(name='cbar', wstype='stack',
group=99)], stretch=0),
dict(row=['ws', dict(name='readout', wstype='stack',
group=99)], stretch=0),
dict(row=['ws', dict(name='operations', wstype='stack',
group=99)], stretch=0),
],
['ws', dict(name='right', wstype='tabs',
width=400, height=-1, group=2),
# (tabname, layout), ...
[("Dialogs", ['ws', dict(name='dialogs', wstype='tabs',
group=2)
]
)]
],
], stretch=1), # noqa
dict(row=['ws', dict(name='toolbar', wstype='stack',
height=40, group=2)],
stretch=0),
dict(row=['hbox', dict(name='status')], stretch=0),
]]
plugins = [
# hidden plugins, started at program initialization
Bunch(module='Operations', workspace='operations', start=True,
hidden=True, category='System', menu="Operations [G]",
ptype='global'),
Bunch(module='Toolbar', workspace='toolbar', start=True,
hidden=True, category='System', menu="Toolbar [G]", ptype='global'),
Bunch(module='Pan', workspace='uleft', start=True,
hidden=True, category='System', menu="Pan [G]", ptype='global'),
Bunch(module='Info', tab='Synopsis', workspace='lleft', start=True,
hidden=True, category='System', menu="Info [G]", ptype='global'),
Bunch(module='Thumbs', tab='Thumbs', workspace='right', start=True,
hidden=True, category='System', menu="Thumbs [G]", ptype='global'),
Bunch(module='Contents', tab='Contents', workspace='right', start=True,
hidden=True, category='System', menu="Contents [G]", ptype='global'),
Bunch(module='Colorbar', workspace='cbar', start=True,
hidden=True, category='System', menu="Colorbar [G]", ptype='global'),
Bunch(module='Cursor', workspace='readout', start=True,
hidden=True, category='System', menu="Cursor [G]", ptype='global'),
Bunch(module='Errors', tab='Errors', workspace='right', start=True,
hidden=True, category='System', menu="Errors [G]", ptype='global'),
Bunch(module='Downloads', tab='Downloads', workspace='right', start=False,
menu="Downloads [G]", category='Utils', ptype='global'),
# optional, user-started plugins
Bunch(module='Blink', tab='Blink Channels', workspace='right', start=False,
menu="Blink Channels [G]", category='Analysis', ptype='global'),
Bunch(module='Blink', workspace='dialogs', menu='Blink Images',
category='Analysis', ptype='local'),
Bunch(module='Crosshair', workspace='left', category='Analysis',
ptype='local'),
Bunch(module='Cuts', workspace='dialogs', category='Analysis',
ptype='local'),
Bunch(module='LineProfile', workspace='dialogs',
category='Analysis.Datacube', ptype='local'),
Bunch(module='Histogram', workspace='dialogs', category='Analysis',
ptype='local'),
Bunch(module='Overlays', workspace='dialogs', category='Analysis',
ptype='local'),
Bunch(module='Pick', workspace='dialogs', category='Analysis',
ptype='local'),
Bunch(module='PixTable', workspace='dialogs', category='Analysis',
ptype='local'),
Bunch(module='TVMark', workspace='dialogs', category='Analysis',
ptype='local'),
Bunch(module='TVMask', workspace='dialogs', category='Analysis',
ptype='local'),
Bunch(module='WCSMatch', tab='WCSMatch', workspace='right', start=False,
menu="WCS Match [G]", category='Analysis', ptype='global'),
Bunch(module='Command', tab='Command', workspace='lleft', start=False,
menu="Command Line [G]", category='Debug', ptype='global'),
Bunch(module='Log', tab='Log', workspace='right', start=False,
menu="Logger Info [G]", category='Debug', ptype='global'),
Bunch(module='MultiDim', workspace='lleft', category='Navigation',
ptype='local'),
Bunch(module='RC', tab='RC', workspace='right', start=False,
menu="Remote Control [G]", category='Remote', ptype='global'),
Bunch(module='SAMP', tab='SAMP', workspace='right', start=False,
menu="SAMP Client [G]", category='Remote', ptype='global'),
Bunch(module='Compose', workspace='dialogs', category='RGB', ptype='local'),
Bunch(module='ScreenShot', workspace='dialogs', category='RGB',
ptype='local'),
Bunch(module='ColorMapPicker', tab='ColorMapPicker',
menu="Set Color Map [G]", workspace='right', start=False,
category='RGB', ptype='global'),
Bunch(module='PlotTable', workspace='dialogs', category='Table',
ptype='local'),
Bunch(module='Catalogs', workspace='dialogs', category='Utils',
ptype='local'),
Bunch(module='Drawing', workspace='dialogs', category='Utils',
ptype='local'),
Bunch(module='AutoLoad', workspace='dialogs', category='Utils',
ptype='local'),
#Bunch(module='Pipeline', workspace='dialogs', category='Utils',
# ptype='local'),
Bunch(module='FBrowser', workspace='dialogs', category='Utils',
ptype='local'),
Bunch(module='ChangeHistory', tab='History', workspace='right',
menu="History [G]", start=False, category='Utils', ptype='global'),
Bunch(module='Mosaic', workspace='dialogs', category='Utils', ptype='local'),
Bunch(module='Collage', workspace='dialogs', category='Utils', ptype='local'),
Bunch(module='FBrowser', tab='Open File', workspace='right',
menu="Open File [G]", start=False, category='Utils', ptype='global'),
Bunch(module='Preferences', workspace='dialogs', category='Utils',
ptype='local'),
Bunch(module='Ruler', workspace='dialogs', category='Utils', ptype='local'),
# TODO: Add SaveImage to File menu.
Bunch(module='SaveImage', tab='SaveImage', workspace='right',
menu="Save File [G]", start=False, category='Utils', ptype='global'),
Bunch(module='WCSAxes', workspace='dialogs', category='Utils',
ptype='local'),
Bunch(module='WBrowser', tab='Help', workspace='channels', start=False,
menu="Help [G]", category='Help', ptype='global'),
Bunch(module='Header', tab='Header', workspace='left', start=False,
menu="Header [G]", hidden=False, category='Utils', ptype='global'),
Bunch(module='Zoom', tab='Zoom', workspace='left', start=False,
menu="Zoom [G]", category='Utils', ptype='global'),
]
class ReferenceViewer(object):
"""
This class exists solely to be able to customize the reference
viewer startup.
"""
def __init__(self, layout=default_layout, plugins=plugins):
self.layout = layout
self.channels = ['Image']
self.default_plugins = plugins
self.plugins = []
def add_plugin_spec(self, spec):
self.plugins.append(spec)
def clear_default_plugins(self):
self.plugins = []
def add_default_plugins(self, except_global=[], except_local=[]):
"""
Add the ginga-distributed default set of plugins to the
reference viewer.
"""
# add default plugins
for spec in self.default_plugins:
ptype = spec.get('ptype', 'local')
if ptype == 'global' and spec.module not in except_global:
self.add_plugin_spec(spec)
if ptype == 'local' and spec.module not in except_local:
self.add_plugin_spec(spec)
def add_separately_distributed_plugins(self):
groups = ['ginga.rv.plugins']
available_methods = []
for group in groups:
discovered_plugins = entry_points().get(group, [])
for entry_point in discovered_plugins:
try:
method = entry_point.load()
available_methods.append(method)
except Exception as e:
print("Error trying to load entry point %s: %s" % (
str(entry_point), str(e)))
for method in available_methods:
try:
spec = method()
self.add_plugin_spec(spec)
except Exception as e:
print("Error trying to instantiate external plugin using %s: %s" % (
str(method), str(e)))
def add_default_options(self, argprs):
"""
Adds the default reference viewer startup options to an
ArgumentParser instance `argprs`.
"""
if hasattr(argprs, 'add_option'):
# older OptParse
add_argument = argprs.add_option
else:
# newer ArgParse
add_argument = argprs.add_argument
add_argument("--basedir", dest="basedir", metavar="NAME",
help="Specify Ginga configuration area")
add_argument("--bufsize", dest="bufsize", metavar="NUM",
type=int, default=10,
help="Buffer length to NUM")
add_argument('-c', "--channels", dest="channels",
help="Specify list of channels to create")
add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
add_argument("--disable-plugins", dest="disable_plugins",
metavar="NAMES",
help="Specify plugins that should be disabled")
add_argument("--display", dest="display", metavar="HOST:N",
help="Use X display on HOST:N")
add_argument("--fitspkg", dest="fitspkg", metavar="NAME",
default=None,
help="Prefer FITS I/O module NAME")
add_argument("-g", "--geometry", dest="geometry",
default=None, metavar="GEOM",
help="X geometry for initial size and placement")
add_argument("--modules", dest="modules", metavar="NAMES",
help="Specify additional modules to load")
add_argument("--norestore", dest="norestore", default=False,
action="store_true",
help="Don't restore the GUI from a saved layout")
add_argument("--nosplash", dest="nosplash", default=False,
action="store_true",
help="Don't display the splash screen")
add_argument("--numthreads", dest="numthreads", type=int,
default=30, metavar="NUM",
help="Start NUM threads in thread pool")
add_argument("--opengl", dest="opengl", default=False,
action="store_true",
help="Use OpenGL acceleration")
add_argument("--plugins", dest="plugins", metavar="NAMES",
help="Specify additional plugins to load")
add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
add_argument("--sep", dest="separate_channels", default=False,
action="store_true",
help="Load files in separate channels")
add_argument("--suppress-fits-warnings",
dest="suppress_fits_warnings", default=False,
action="store_true",
help="Suppress FITS verify warnings")
add_argument("-t", "--toolkit", dest="toolkit", metavar="NAME",
default=None,
help="Prefer GUI toolkit (gtk|qt)")
add_argument("--wcspkg", dest="wcspkg", metavar="NAME",
default=None,
help="Prefer WCS module NAME")
log.addlogopts(argprs)
def main(self, options, args):
"""
Main routine for running the reference viewer.
`options` is a ArgumentParser object that has been populated with
values from parsing the command line. It should at least include
the options from add_default_options()
`args` is a list of arguments to the viewer after parsing out
options. It should contain a list of files or URLs to load.
"""
# Create a logger
logger = log.get_logger(name='ginga', options=options)
if options.basedir is not None:
paths.ginga_home = os.path.expanduser(options.basedir)
# Get settings (preferences)
basedir = paths.ginga_home
if not os.path.exists(basedir):
try:
os.mkdir(basedir)
except OSError as e:
logger.warning(
"Couldn't create ginga settings area (%s): %s" % (
basedir, str(e)))
logger.warning("Preferences will not be able to be saved")
# Set up preferences
prefs = Settings.Preferences(basefolder=basedir, logger=logger)
settings = prefs.create_category('general')
settings.set_defaults(useMatplotlibColormaps=False,
widgetSet='choose',
WCSpkg='choose', FITSpkg='choose',
suppress_fits_warnings=False,
recursion_limit=2000,
icc_working_profile=None,
font_scaling_factor=None,
save_layout=True,
use_opengl=False,
layout_file='layout',
plugin_file='plugins.json',
channel_prefix="Image")
settings.load(onError='silent')
# default of 1000 is a little too small
sys.setrecursionlimit(settings.get('recursion_limit'))
# So we can find our plugins
sys.path.insert(0, basedir)
package_home = os.path.split(sys.modules['ginga.version'].__file__)[0]
child_dir = os.path.join(package_home, 'rv', 'plugins')
sys.path.insert(0, child_dir)
plugin_dir = os.path.join(basedir, 'plugins')
sys.path.insert(0, plugin_dir)
gc = os.path.join(basedir, "ginga_config.py")
have_ginga_config = os.path.exists(gc)
# User configuration, earliest possible intervention
if have_ginga_config:
try:
import ginga_config
if hasattr(ginga_config, 'init_config'):
ginga_config.init_config(self)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Choose a toolkit
if options.toolkit:
toolkit = options.toolkit
else:
toolkit = settings.get('widgetSet', 'choose')
if toolkit == 'choose':
try:
ginga_toolkit.choose()
except ImportError as e:
print("UI toolkit choose error: %s" % str(e))
sys.exit(1)
else:
ginga_toolkit.use(toolkit)
tkname = ginga_toolkit.get_family()
logger.info("Chosen toolkit (%s) family is '%s'" % (
ginga_toolkit.toolkit, tkname))
# these imports have to be here, otherwise they force the choice
# of toolkit too early
from ginga.rv.Control import GingaShell, GuiLogHandler
if settings.get('useMatplotlibColormaps', False):
# Add matplotlib color maps if matplotlib is installed
try:
from ginga import cmap
cmap.add_matplotlib_cmaps(fail_on_import_error=False)
except Exception as e:
logger.warning(
"failed to load matplotlib colormaps: %s" % (str(e)))
# Set a working RGB ICC profile if user has one
working_profile = settings.get('icc_working_profile', None)
rgb_cms.working_profile = working_profile
# User wants to customize the WCS package?
if options.wcspkg:
wcspkg = options.wcspkg
else:
wcspkg = settings.get('WCSpkg', 'choose')
try:
from ginga.util import wcsmod
if wcspkg != 'choose':
assert wcsmod.use(wcspkg) is True
except Exception as e:
logger.warning(
"failed to set WCS package preference '{}': {}".format(wcspkg, e))
# User wants to customize the FITS package?
if options.fitspkg:
fitspkg = options.fitspkg
else:
fitspkg = settings.get('FITSpkg', 'choose')
if options.suppress_fits_warnings:
supp_warn = options.suppress_fits_warnings
else:
supp_warn = settings.get('suppress_fits_warnings', False)
if supp_warn:
import warnings
from astropy.io import fits
warnings.simplefilter('ignore', fits.verify.VerifyWarning)
try:
from ginga.util import io_fits, loader
if fitspkg != 'choose':
assert io_fits.use(fitspkg) is True
# opener name is not necessarily the same
opener = loader.get_opener(io_fits.fitsLoaderClass.name)
# set this opener as the priority one
opener.priority = -99
except Exception as e:
logger.warning(
"failed to set FITS package preference '{}': {}".format(fitspkg, e))
# Create the dynamic module manager
mm = ModuleManager.ModuleManager(logger)
# Create and start thread pool
ev_quit = threading.Event()
thread_pool = Task.ThreadPool(options.numthreads, logger,
ev_quit=ev_quit)
thread_pool.startall()
# Create the Ginga main object
ginga_shell = GingaShell(logger, thread_pool, mm, prefs,
ev_quit=ev_quit)
if options.opengl:
settings.set(use_opengl=True)
layout_file = os.path.join(basedir, settings.get('layout_file',
'layout'))
ginga_shell.set_layout(self.layout, layout_file=layout_file,
save_layout=settings.get('save_layout', True))
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'pre_gui_config'):
ginga_config.pre_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error importing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Build desired layout
ginga_shell.build_toplevel(ignore_saved_layout=options.norestore)
# Does user have a customized plugin setup? If so, override the
# default plugins to be that
plugin_file = settings.get('plugin_file', None)
if plugin_file is not None:
plugin_file = os.path.join(basedir, plugin_file)
if os.path.exists(plugin_file):
logger.info("Reading plugin file '%s'..." % (plugin_file))
try:
with open(plugin_file, 'r') as in_f:
buf = in_f.read()
self.plugins = json.loads(buf)
except Exception as e:
logger.error("Error reading plugin file: %s" % (str(e)))
# Did user specify a particular geometry?
if options.geometry:
ginga_shell.set_geometry(options.geometry)
# make the list of disabled plugins
if options.disable_plugins is not None:
disabled_plugins = options.disable_plugins.lower().split(',')
else:
disabled_plugins = settings.get('disable_plugins', [])
if not isinstance(disabled_plugins, list):
disabled_plugins = disabled_plugins.lower().split(',')
# Add GUI log handler (for "Log" global plugin)
guiHdlr = GuiLogHandler(ginga_shell)
guiHdlr.setLevel(options.loglevel)
fmt = logging.Formatter(log.LOG_FORMAT)
guiHdlr.setFormatter(fmt)
logger.addHandler(guiHdlr)
# Load any custom modules
if options.modules is not None:
modules = options.modules.split(',')
else:
modules = settings.get('global_plugins', [])
if not isinstance(modules, list):
modules = modules.split(',')
for long_plugin_name in modules:
if '.' in long_plugin_name:
tmpstr = long_plugin_name.split('.')
plugin_name = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
plugin_name = long_plugin_name
pfx = None
menu_name = "%s [G]" % (plugin_name)
spec = Bunch(name=plugin_name, module=plugin_name,
ptype='global', tab=plugin_name,
menu=menu_name, category="Custom",
workspace='right', pfx=pfx)
self.add_plugin_spec(spec)
# Load any custom local plugins
if options.plugins is not None:
plugins = options.plugins.split(',')
else:
plugins = settings.get('local_plugins', [])
if not isinstance(plugins, list):
plugins = plugins.split(',')
for long_plugin_name in plugins:
if '.' in long_plugin_name:
tmpstr = long_plugin_name.split('.')
plugin_name = tmpstr[-1]
pfx = '.'.join(tmpstr[:-1])
else:
plugin_name = long_plugin_name
pfx = None
spec = Bunch(module=plugin_name, workspace='dialogs',
ptype='local', category="Custom",
hidden=False, pfx=pfx)
self.add_plugin_spec(spec)
# Mark disabled plugins
for spec in self.plugins:
if spec.get('enabled', None) is None:
spec['enabled'] = (False if spec.module.lower() in disabled_plugins
else True)
# submit plugin specs to shell
ginga_shell.set_plugins(self.plugins)
# start any plugins that have start=True
ginga_shell.boot_plugins()
ginga_shell.update_pending()
# TEMP?
tab_names = [name.lower()
for name in ginga_shell.ds.get_tabnames(group=None)]
if 'info' in tab_names:
ginga_shell.ds.raise_tab('Info')
if 'synopsis' in tab_names:
ginga_shell.ds.raise_tab('Synopsis')
if 'thumbs' in tab_names:
ginga_shell.ds.raise_tab('Thumbs')
# Add custom channels
if options.channels is not None:
channels = options.channels.split(',')
else:
channels = settings.get('channels', self.channels)
if not isinstance(channels, list):
channels = channels.split(',')
if len(channels) == 0:
# should provide at least one default channel?
channels = [settings.get('channel_prefix', "Image")]
# populate the initial channel lineup
for item in channels:
if isinstance(item, str):
chname, wsname = item, None
else:
chname, wsname = item
ginga_shell.add_channel(chname, workspace=wsname)
ginga_shell.change_channel(chname)
# User configuration (custom star catalogs, etc.)
if have_ginga_config:
try:
if hasattr(ginga_config, 'post_gui_config'):
ginga_config.post_gui_config(ginga_shell)
except Exception as e:
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception:
tb_str = "Traceback information unavailable."
logger.error("Error processing Ginga config file: %s" % (
str(e)))
logger.error("Traceback:\n%s" % (tb_str))
# Redirect warnings to logger
for hdlr in logger.handlers:
logging.getLogger('py.warnings').addHandler(hdlr)
# Display banner the first time run, unless suppressed
show_banner = True
try:
show_banner = settings.get('showBanner')
except KeyError:
# disable for subsequent runs
settings.set(showBanner=False)
if not os.path.exists(settings.preffile):
settings.save()
if (not options.nosplash) and (len(args) == 0) and show_banner:
ginga_shell.banner(raiseTab=True)
# Handle inputs like "*.fits[ext]" that sys cmd cannot auto expand.
expanded_args = []
for imgfile in args:
if '*' in imgfile:
if '[' in imgfile and imgfile.endswith(']'):
s = imgfile.split('[')
ext = '[' + s[1]
imgfile = s[0]
else:
ext = ''
for fname in glob.iglob(imgfile):
expanded_args.append(fname + ext)
else:
expanded_args.append(imgfile)
# Assume remaining arguments are fits files and load them.
if not options.separate_channels:
chname = channels[0]
ginga_shell.gui_do(ginga_shell.open_uris, expanded_args,
chname=chname)
else:
i = 0
num_channels = len(channels)
for imgfile in expanded_args:
if i < num_channels:
chname = channels[i]
i = i + 1
else:
channel = ginga_shell.add_channel_auto()
chname = channel.name
ginga_shell.gui_do(ginga_shell.open_uris, [imgfile],
chname=chname)
try:
try:
# if there is a network component, start it
if hasattr(ginga_shell, 'start'):
logger.info("starting network interface...")
ginga_shell.start()
# Main loop to handle GUI events
logger.info("entering mainloop...")
ginga_shell.mainloop(timeout=0.001)
except KeyboardInterrupt:
logger.error("Received keyboard interrupt!")
finally:
logger.info("Shutting down...")
ev_quit.set()
sys.exit(0)
def reference_viewer(sys_argv):
"""Create reference viewer from command line."""
viewer = ReferenceViewer(layout=default_layout)
viewer.add_default_plugins()
viewer.add_separately_distributed_plugins()
# Parse command line options with argparse module
from argparse import ArgumentParser
argprs = ArgumentParser(description="Run the Ginga reference viewer.")
viewer.add_default_options(argprs)
argprs.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(version.version))
(options, args) = argprs.parse_known_args(sys_argv[1:])
if options.display:
os.environ['DISPLAY'] = options.display
# Are we debugging this?
if options.debug:
import pdb
pdb.run('viewer.main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys_argv[0]))
profile.runctx('viewer.main(options, args)',
dict(options=options, args=args, viewer=viewer), {})
else:
viewer.main(options, args)
def _main():
"""Run from command line."""
reference_viewer(sys.argv)
# END
| bsd-3-clause | 1ca76773ee524c54ba49daa1da4d01c5 | 40.198667 | 84 | 0.539791 | 4.377869 | false | false | false | false |
ejeschke/ginga | ginga/rv/plugins/Colorbar.py | 3 | 5458 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
The ``Colorbar`` plugin shows a colorbar indicating the colormap applied
to the image and showing the example values along the range.
**Plugin Type: Global**
``Colorbar`` is a global plugin. Only one instance can be opened.
**Usage**
Clicking and dragging in the ``Colorbar`` window will shift the colormap
left or right. Scrolling will stretch or shrink the colormap at the
cursor position. Right-clicking will restore the colormap from any
shift or stretch.
If the focus shifts to another channel, the colorbar will be updated
to reflect that channel's colormap and value information.
"""
from ginga import GingaPlugin
from ginga.misc import Bunch
from ginga.gw import ColorBar
__all__ = ['Colorbar']
class Colorbar(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Colorbar, self).__init__(fv)
self._image = None
self.active = None
self.info = None
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Colorbar')
self.settings.add_defaults(cbar_height=36, fontsize=10)
self.settings.load(onError='silent')
fv.add_callback('add-channel', self.add_channel_cb)
fv.add_callback('delete-channel', self.delete_channel_cb)
fv.add_callback('channel-change', self.change_cbar)
self.colorbar = None
self.cursor_obj = None
self.gui_up = False
def build_gui(self, container):
cbar = ColorBar.ColorBar(self.logger, settings=self.settings)
cbar.set_cmap(self.fv.cm)
cbar.set_imap(self.fv.im)
cbar_w = cbar.get_widget()
cbar_ht = self.settings.get('cbar_height', 36)
cbar_w.resize(-1, cbar_ht)
self.colorbar = cbar
cbar.add_callback('motion', self.cbar_value_cb)
# see cbar_val_cb()
if self.fv.gpmon.has_plugin('Cursor'):
self.cursor_obj = self.fv.gpmon.get_plugin('Cursor')
container.add_widget(cbar_w, stretch=0)
self.gui_up = True
def add_channel_cb(self, viewer, channel):
settings = channel.settings
settings.get_setting('cuts').add_callback(
'set', self.change_range_cb, channel.fitsimage)
chname = channel.name
info = Bunch.Bunch(chname=chname, channel=channel)
channel.extdata._colorbar_info = info
fi = channel.fitsimage
rgbmap = fi.get_rgbmap()
rgbmap.add_callback('changed', self.rgbmap_cb, channel)
def delete_channel_cb(self, viewer, channel):
chname = channel.name
self.logger.debug("deleting channel %s" % (chname))
self.active = None
self.info = None
def _match_cmap(self, fitsimage, colorbar):
"""
Help method to change the ColorBar to match the cut levels or
colormap used in a ginga ImageView.
"""
rgbmap = fitsimage.get_rgbmap()
loval, hival = fitsimage.get_cut_levels()
colorbar.set_range(loval, hival)
# If we are sharing a ColorBar for all channels, then store
# to change the ColorBar's rgbmap to match our
colorbar.set_rgbmap(rgbmap)
def change_cbar(self, viewer, channel):
if self.gui_up and channel is not None:
self._match_cmap(channel.fitsimage, self.colorbar)
def change_range_cb(self, setting, value, fitsimage):
"""
This method is called when the cut level values (lo/hi) have
changed in a channel. We adjust them in the ColorBar to match.
"""
if not self.gui_up:
return
if fitsimage != self.fv.getfocus_viewer():
# values have changed in a channel that doesn't have the focus
return False
loval, hival = value
self.colorbar.set_range(loval, hival)
def cbar_value_cb(self, cbar, value, event):
"""
This method is called when the user moves the mouse over the
ColorBar. It displays the value of the mouse position in the
ColorBar in the Readout (if any).
"""
if self.cursor_obj is not None:
readout = self.cursor_obj.readout
if readout is not None:
maxv = readout.maxv
text = "Value: %-*.*s" % (maxv, maxv, value)
readout.set_text(text)
def rgbmap_cb(self, rgbmap, channel):
"""
This method is called when the RGBMap is changed. We update
the ColorBar to match.
"""
if not self.gui_up:
return
fitsimage = channel.fitsimage
if fitsimage != self.fv.getfocus_viewer():
return False
self.change_cbar(self.fv, channel)
def start(self):
channel = self.fv.get_channel_info()
self.change_cbar(self.fv, channel)
def stop(self):
self.gui_up = False
self.cursor_obj = None
self.colorbar = None
return True
def close(self):
self.fv.stop_global_plugin(str(self))
return True
def __str__(self):
return 'colorbar'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_Colorbar', package='ginga')
# END
| bsd-3-clause | 80890a5a22292ea092c9829b7aaf91f8 | 32.078788 | 74 | 0.62697 | 3.685348 | false | false | false | false |
ejeschke/ginga | experimental/remote_image/plugin/RemoteData.py | 5 | 3219 | #
# RemoteData.py -- Remote Data plugin for Ginga image viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
"""
import sys
from ginga import GingaPlugin
from ginga.util.grc import RemoteClient
from ginga.gw import Widgets
from RemoteImage import RemoteImage
help_msg = sys.modules[__name__].__doc__
class RemoteData(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(RemoteData, self).__init__(fv, fitsimage)
# What port to connect to for requests
self.port = 9909
# What host to connect to
self.host = 'localhost'
self.ev_quit = fv.ev_quit
def build_gui(self, container):
vbox = Widgets.VBox()
fr = Widgets.Frame("Remote Control")
captions = [
("Addr:", 'label', "Addr", 'llabel'),
("Set Addr:", 'label', "Set Addr", 'entryset'),
("Remote Path:", 'label', "Remote Path", 'entry'),
]
w, b = Widgets.build_info(captions)
self.w.update(b)
addr = self.host + ':' + str(self.port)
b.addr.set_text(addr)
b.set_addr.set_length(100)
b.set_addr.set_text(addr)
b.set_addr.set_tooltip("Set address to connect to remote server")
b.set_addr.add_callback('activated', self.set_addr_cb)
b.remote_path.add_callback('activated', self.load_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# stretch
vbox.add_widget(Widgets.Label(''), stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btns.set_border_width(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns)
container.add_widget(vbox, stretch=1)
def start(self):
pass
def stop(self):
pass
def restart_cb(self, w):
# restart server
self.server.stop()
self.start()
def set_addr_cb(self, w):
# get and parse address
addr = w.get_text()
host, port = addr.split(':')
self.host = host
self.port = int(port)
self.w.addr.set_text(addr)
self.proxy = RemoteClient(self.host, self.port)
def load_cb(self, w):
path = w.get_text().strip()
try:
image = RemoteImage(self.proxy, logger=self.logger)
image.load_file(path)
chname = self.fv.get_channelName(self.fitsimage)
imname = image.get('name', None)
if imname is None:
imname = self.fv.name_image_from_path(path)
image.set(name=imname)
self.logger.debug("Adding image '%s'" % (imname))
self.fv.add_image(imname, image, chname=chname)
except Exception as e:
self.fv.show_error("Error loading remote image: %s" % (str(e)))
def close(self):
self.fv.stop_local_plugin(str(self))
return True
def __str__(self):
return 'remotedata'
#END
| bsd-3-clause | 724d29cf91ccf3ed9ab92bf123f52b86 | 25.385246 | 75 | 0.575645 | 3.491323 | false | false | false | false |
ejeschke/ginga | experimental/plugins/Imexam.py | 3 | 11959 | #
# Imexam.py -- Imexam plugin for Ginga reference viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import inspect
import traceback
import logging
from io import StringIO
from ginga import GingaPlugin
from ginga.gw import Widgets
from ginga.gw import Plot
from ginga.util import plots
from ginga.misc import Bunch
have_imexam = False
try:
from imexam.imexamine import Imexamine
have_imexam = True
except ImportError:
pass
class Imexam(GingaPlugin.LocalPlugin):
"""
This is an experimental Ginga plugin for the "imexam" package.
To use it you will need to install the "imexam" package:
https://github.com/spacetelescope/imexam
To install this plugin:
$ mkdir $HOME/.ginga/plugins
$ cp Imexam.py $HOME/.ginga/plugins/.
To use:
$ ginga ... --plugins=Imexam
Then from the "Operations" menu, choose "Imexam".
KNOWN ISSUES:
- You need ginga v2.6.0.dev
- When a plot is created for the first time, it will force the
focus away from the channel viewer. This means that keystrokes
will not be recognized in the viewer again until you give it the
focus back (by say, clicking in the window)
- It makes the most sense to use the plugin with the channels
workspace in "MDI" mode, although it works fine in other
configurations.
- Closing the plot windows is only possible currently by making
sure the window has the focus and then using the workspace toolbar
"-" button to delete it.
- If you close an active plot window, you will need to press the
"Detach Plot" button before plotting will work again--it doesn't
recognize that the window has been closed.
"""
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Imexam, self).__init__(fv, fitsimage)
# get Imexam preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Imexam')
self.settings.addDefaults(font='Courier', fontsize=12,
plots_in_workspace=False)
self.settings.load(onError='silent')
self.layertag = 'imexam-canvas'
self.imexam_active = False
# this is our imexamine object
self.imex = Imexamine()
# capture the stdout logger from imexam
self.log_capture_string = StringIO()
self.stream_handler = logging.StreamHandler(self.log_capture_string)
self.stream_handler.setLevel(logging.INFO)
self.imex.log.addHandler(self.stream_handler)
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.set_callback('key-press', self.key_press_cb)
canvas.set_surface(self.fitsimage)
canvas.register_for_cursor_drawing(self.fitsimage)
canvas.enable_draw(False)
canvas.name = 'Imexam-canvas'
self.canvas = canvas
self._plot = None
self._plot_w = None
self._plot_idx = 0
self._plots_in_ws = self.settings.get('plots_in_workspace', False)
self.w = Bunch.Bunch()
def build_gui(self, container):
if not have_imexam:
raise Exception("Please install 'imexam' to use this plugin")
top = Widgets.VBox()
top.set_border_width(4)
fontsize = self.settings.get('fontsize', 12)
msg_font = self.fv.get_font('sans', fontsize)
tw = Widgets.TextArea(wrap=False, editable=False)
tw.set_font(msg_font)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
top.add_widget(fr, stretch=0)
fr = Widgets.Frame("Imexam output:")
if not self._plots_in_ws:
splitter = Widgets.Splitter(orientation='vertical')
self.nb = Widgets.TabWidget()
splitter.add_widget(self.nb)
# this holds the messages returned from imexamine
tw = Widgets.TextArea(wrap=False, editable=False)
font = self.settings.get('font', 'Courier')
fixed_font = self.fv.get_font(font, fontsize)
tw.set_font(fixed_font)
self.msg_res = tw
if not self._plots_in_ws:
splitter.add_widget(tw)
fr.set_widget(splitter)
else:
fr.set_widget(tw)
top.add_widget(fr, stretch=1)
hbox = Widgets.HBox()
btn = Widgets.Button('Detach Plot')
btn.add_callback('activated', self.detach_plot_cb)
btn.set_tooltip("Detach current plot and start a new one")
hbox.add_widget(btn, stretch=0)
btn = Widgets.Button('Clear Text')
btn.add_callback('activated', self.clear_text_cb)
btn.set_tooltip("Clear the imexam output")
hbox.add_widget(btn, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(hbox, stretch=0)
hbox = Widgets.HBox()
lbl = Widgets.Label("Keys active:")
hbox.add_widget(lbl)
btn1 = Widgets.RadioButton("On")
btn1.set_state(self.imexam_active)
btn1.add_callback('activated', lambda w, val: self.set_active_cb(True, val))
btn1.set_tooltip("Enable imexam keys")
self.w.btn_on = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Off", group=btn1)
btn2.set_state(not self.imexam_active)
btn2.add_callback('activated', lambda w, val: self.set_active_cb(False, val))
btn2.set_tooltip("Disable imexam keys")
self.w.btn_off = btn2
hbox.add_widget(btn2)
hbox.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(hbox, stretch=0)
btns = Widgets.HBox()
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
self._plot = None
self._plot_w = None
self._plot_idx = 0
self.make_new_figure()
container.add_widget(top, stretch=1)
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def set_active(self, onoff, update_ui=False):
self.imexam_active = onoff
self.canvas.ui_setActive(onoff)
if update_ui and 'btn_on' in self.w:
if onoff:
self.w.btn_on.set_state(True)
else:
self.w.btn_off.set_state(True)
if onoff:
msg = "Imexam keys are active"
else:
msg = "Imexam keys deactivated"
self.fitsimage.onscreen_message(msg, delay=1.0)
self.fv.show_status(msg)
def set_active_cb(self, tf, onoff):
if tf:
self.set_active(onoff, update_ui=False)
def detach_plot_cb(self, w):
self._plot = None
self._plot_w = None
self.make_new_figure()
def clear_text_cb(self, w):
self.msg_res.clear()
def instructions(self):
lines = ["Key bindings:"]
for key, tup in self.imex.imexam_option_funcs.items():
func, descr = tup
lines.append(" %s : %s" % (key, descr))
text = '\n'.join(lines)
self.tw.set_text(text)
def start(self):
self.instructions()
p_canvas = self.fitsimage.get_canvas()
if not p_canvas.has_object(self.canvas):
p_canvas.add(self.canvas, tag=self.layertag)
self.clear()
self.resume()
def pause(self):
self.set_active(False, update_ui=True)
def resume(self):
self.set_active(True, update_ui=True)
def stop(self):
self.pause()
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except Exception:
pass
self.fv.show_status("")
def redo(self):
pass
def make_new_figure(self):
chname = self.fv.get_channel_name(self.fitsimage)
wd, ht = 400, 300
self._plot_idx += 1
self._plot = plots.Plot(logger=self.logger,
width=wd, height=ht)
name = "%s: Fig %d" % (chname, self._plot_idx)
group = 10
pw = Plot.PlotWidget(self._plot)
vbox = Widgets.VBox()
vbox.add_widget(pw, stretch=1)
hbox = Widgets.HBox()
hbox.add_widget(Widgets.Label(''), stretch=1)
btn = Widgets.Button('Close Plot')
btn.add_callback('activated', lambda w: self.close_plot(name, vbox))
hbox.add_widget(btn, stretch=0)
vbox.add_widget(hbox, stretch=0)
# vbox.resize(wd, ht)
self._plot_w = vbox
if self._plots_in_ws:
ws = self.fv.get_current_workspace()
tab = self.fv.ds.add_tab(ws.name, vbox, group, name, name,
data=dict(plot=self._plot))
else:
self.nb.add_widget(vbox, name)
# imexam should get a clean figure
fig = self._plot.get_figure()
fig.clf()
def close_plot(self, name, child):
if child == self._plot_w:
self.make_new_figure()
if not self._plots_in_ws:
self.nb.remove(child)
return True
def imexam_cmd(self, canvas, keyname, data_x, data_y, func):
if not self.imexam_active:
return False
self.logger.debug("imexam_cb")
# keyname = event.key
self.logger.debug("key pressed: %s" % (keyname))
image = self.fitsimage.get_image()
if image is None:
return False
# inspect func to see what kind of things we can pass in
args, varargs, varkw, defaults = inspect.getargspec(func)
kwargs = dict()
if 'data' in args:
# pass the data array
data_np = image.get_data()
kwargs['data'] = data_np
if 'fig' in args:
# Make a new figure if we don't have one
if self._plot is None:
self.make_new_figure()
kwargs['fig'] = self._plot.get_figure()
self.log_capture_string.seek(0)
self.log_capture_string.truncate()
self.msg_res.append_text("----\ncmd: '%s'\n" % keyname)
try:
func(data_x, data_y, **kwargs)
self.msg_res.append_text(self.log_capture_string.getvalue(),
autoscroll=True)
except Exception as e:
self.msg_res.append_text(self.log_capture_string.getvalue(),
autoscroll=True)
# get any partial output
errmsg = ("Error calling imexam function: %s" % (
str(e)))
self.msg_res.append_text(errmsg)
# show traceback
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception as e:
tb_str = "Traceback information unavailable."
self.msg_res.append_text(tb_str, autoscroll=True)
return True
def key_press_cb(self, canvas, keyname):
# some keys that we explicitly can't handle from imexamine
if keyname == '2':
self.detach_plot_cb(None)
return True
try:
# lookup imexamine function
func, descr = self.imex.imexam_option_funcs[keyname]
except KeyError:
# no key binding for this in imexam
return False
data_x, data_y = self.fitsimage.get_last_data_xy()
return self.imexam_cmd(self.canvas, keyname, data_x, data_y, func)
def clear(self):
self.canvas.delete_all_objects()
return False
def __str__(self):
return 'imexam'
# END
| bsd-3-clause | 74847dafe29d9a7ecb4a693b747014f0 | 29.742931 | 85 | 0.583995 | 3.570917 | false | false | false | false |
ejeschke/ginga | ginga/mplw/FigureCanvasQt.py | 3 | 2310 | #
# GingaCanvasQt.py -- classes for the display of FITS files in
# Matplotlib FigureCanvas
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.toolkit import toolkit
if toolkit in ('qt6', 'pyside6'):
from matplotlib.backends.backend_qtagg import FigureCanvasQTAgg as QtFigureCanvas
else:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as QtFigureCanvas
def setup_Qt(widget, viewer):
_resizeEvent = widget.resizeEvent
def resizeEvent(*args):
rect = widget.geometry()
x1, y1, x2, y2 = rect.getCoords()
width = x2 - x1
height = y2 - y1
if viewer is not None:
viewer.configure_window(width, height)
_resizeEvent(*args)
widget.setFocusPolicy(QtCore.Qt.FocusPolicy(
QtCore.Qt.TabFocus |
QtCore.Qt.ClickFocus |
QtCore.Qt.StrongFocus |
QtCore.Qt.WheelFocus))
widget.setMouseTracking(True)
widget.setAcceptDrops(True)
# Matplotlib has a bug where resize events are not reported
widget.resizeEvent = resizeEvent
class FigureCanvas(QtFigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).
"""
def __init__(self, fig, parent=None, width=5, height=4, dpi=100):
QtFigureCanvas.__init__(self, fig)
self.viewer = None
setup_Qt(self, None)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def resizeEvent(self, event):
rect = self.geometry()
x1, y1, x2, y2 = rect.getCoords()
width = x2 - x1
height = y2 - y1
if self.viewer is not None:
self.viewer.configure_window(width, height)
return super(FigureCanvas, self).resizeEvent(event)
def sizeHint(self):
width, height = 300, 300
if self.viewer is not None:
width, height = self.viewer.get_desired_size()
return QtCore.QSize(width, height)
def set_viewer(self, viewer):
self.viewer = viewer
#END
| bsd-3-clause | d699d466b8f1d6607849d5dac1478a9d | 27.875 | 86 | 0.631602 | 3.830846 | false | false | false | false |
ejeschke/ginga | ginga/rv/plugins/Errors.py | 3 | 4260 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
The ``Errors`` plugin reports error messages on the viewer.
**Plugin Type: Global**
``Errors`` is a global plugin. Only one instance can be opened.
**Usage**
When an error occurs in Ginga, its message may be reported here.
This plugin is not usually configured to be closeable, but the user can
make it so by setting the "closeable" setting to True in the configuration
file--then Close and Help buttons will be added to the bottom of the UI.
"""
import time
from collections import deque
from ginga import GingaPlugin
from ginga.gw import Widgets
__all__ = ['Errors']
class Errors(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Errors, self).__init__(fv)
spec = self.fv.get_plugin_spec(str(self))
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Errors')
self.settings.add_defaults(closeable=not spec.get('hidden', False),
max_errors=100)
self.settings.load(onError='silent')
max_errors = self.settings.get('max_errors', 100)
self.pending_errors = deque([], max_errors)
self.gui_up = False
def build_gui(self, container):
self.msg_font = self.fv.get_font('fixed', 10)
vbox = Widgets.VBox()
mlst = Widgets.VBox()
mlst.set_spacing(2)
self.msg_list = mlst
sw = Widgets.ScrollArea()
sw.set_widget(self.msg_list)
vbox.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
if self.settings.get('closeable', False):
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Remove All")
btn.add_callback('activated', lambda w: self.remove_all())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns, stretch=0)
container.add_widget(vbox, stretch=1)
self.gui_up = True
def add_error(self, errmsg, ts=None):
if ts is None:
# Add the time the error occurred
ts = time.strftime("%m/%d %H:%M:%S", time.localtime())
if not self.gui_up:
self.pending_errors.append((errmsg, ts))
return
vbox = Widgets.VBox()
hbox = Widgets.HBox()
# Add the time the error occurred
ts = time.strftime("%m/%d %H:%M:%S", time.localtime())
lbl = Widgets.Label(ts, halign='left')
hbox.add_widget(lbl, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
tw = Widgets.TextArea(editable=False, wrap=False)
tw.set_font(self.msg_font)
tw.set_text(errmsg)
vbox.add_widget(tw, stretch=1)
hbox = Widgets.HBox()
btn = Widgets.Button("Remove")
btn.add_callback('activated', lambda w: self.remove_error(vbox))
hbox.add_widget(btn)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
# special hack for Qt
vbox.cfg_expand(horizontal='minimum')
self.msg_list.add_widget(vbox, stretch=0)
# TODO: force scroll to bottom
def remove_error(self, child):
self.msg_list.remove(child)
def remove_all(self):
self.pending_errors.clear()
for child in list(self.msg_list.get_children()):
self.remove_error(child)
def start(self):
pending = self.pending_errors
self.pending_errors = []
for errmsg, ts in pending:
self.add_error(errmsg, ts=ts)
def stop(self):
self.pending_errors = []
self.gui_up = False
def close(self):
self.fv.stop_global_plugin(str(self))
return True
def __str__(self):
return 'errors'
# END
| bsd-3-clause | 9d05c804fb948535bb4674b9a3344af4 | 28.37931 | 75 | 0.602582 | 3.610169 | false | false | false | false |
ejeschke/ginga | ginga/fonts/font_asst.py | 3 | 4215 | #
# font_asst.py -- Font assistant routines
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os
from ginga.misc import Bunch
# lookup table of cached fonts, for backends that build them
font_cache = Bunch.Bunch(caseless=True)
# lookup table of font
font_dir = Bunch.Bunch(caseless=True)
# Set up font alias mapping
aliases = Bunch.Bunch(caseless=True)
# font scaling factors
default_scaling_factor = 1.0
scaling_factor = Bunch.Bunch(caseless=True)
def add_alias(alias_name, font_name):
"""Add an alias for a font family name.
e.g. add_alias('fixed', 'Monotype')
"""
global aliases
aliases[alias_name] = font_name
def resolve_alias(alias_name, alt_font_name):
"""Resolve an alias for a font family name, providing an alternate
name if the font alias does not exist.
e.g. resolve_alias('fixed', 'Courier')
"""
return aliases.get(alias_name, alt_font_name)
def get_cache(font_key):
"""Return a previously cached font object appropriate to the backend
based on a key generated by the backend.
Will return a KeyError in the case of a missing font.
"""
font = font_cache[font_key]
return font
def add_cache(font_key, font):
"""Add a `font` object to the cache, placed under key `font_key`.
"""
global font_cache
font_cache[font_key] = font
def add_font(font_file, font_name=None):
"""Add a font description to our directory of externally loadable fonts.
`font_file` is the path to the font, and optional `font_name` is the
name to register it under.
"""
global font_dir
if font_name is None:
# determine family name from filename of font
dirname, filename = os.path.split(font_file)
font_name, ext = os.path.splitext(filename)
font_dir[font_name] = Bunch.Bunch(name=font_name, font_path=font_file)
return font_name
def have_font(font_name):
"""Return True if the given font name is registered as one of our
externally loadable fonts. If `font_name` is not found, it will try
to look it up as an alias and report if that is found.
"""
if font_name in font_dir:
return True
# try it as an alias
font_name = resolve_alias(font_name, font_name)
return font_name in font_dir
def get_loadable_fonts():
"""Return the sequence of externally loadable fonts.
"""
return list(font_dir.keys())
def get_font_info(font_name, subst_ok=True):
"""Return known info on an externally loadable font (including its path).
`font_name` is assumed to have already been resolved from an alias.
Will return a KeyError in the case of a missing font, unless
subst_ok is True, in which case an alternate font may be returned.
"""
global font_dir
if font_name in font_dir:
font_info = font_dir[font_name]
elif subst_ok:
# substitute an alternate font
font_name = resolve_alias('house', 'fixed')
font_info = font_dir[font_name]
else:
raise KeyError(font_name)
return font_info
def remove_font(font_name):
"""Remove `font_name` from the directory of loadable fonts.
"""
global font_dir
try:
del font_dir[font_name]
except KeyError:
pass
def scale_fontsize(key, fontsize):
factor = scaling_factor.get(key, default_scaling_factor)
return int(round(fontsize * factor))
def set_scale_factor(key, factor):
"""Set the scale factor for a renderer `key` to `factor`.
"""
global scaling_factor
scaling_factor[key] = factor
# --- Set up bundled fonts ---
fontdir, xx = os.path.split(__file__)
add_font(os.path.join(fontdir, 'Roboto', 'Roboto-Regular.ttf'),
font_name='roboto')
add_font(os.path.join(fontdir, 'Roboto_Condensed', 'RobotoCondensed-Regular.ttf'),
font_name='roboto condensed')
add_font(os.path.join(fontdir, 'Ubuntu_Mono', 'UbuntuMono-Regular.ttf'),
font_name='ubuntu mono')
# house font needs to be available
add_alias('house', 'ubuntu mono')
add_alias('fixed', 'ubuntu mono')
add_alias('sans', 'roboto')
add_alias('sans serif', 'roboto')
add_alias('sans condensed', 'roboto condensed')
| bsd-3-clause | c020ea19435fc9c40c162ba119934dd3 | 27.288591 | 82 | 0.67331 | 3.435208 | false | false | false | false |
ejeschke/ginga | ginga/util/mosaic.py | 2 | 46524 | #
# mosaic.py -- Classes for quick and dirty mosaicing of FITS images
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import time
import warnings
import numpy as np
from ginga import AstroImage, trcalc
from ginga.util import wcs, loader, dp, iqcalc
from ginga.util import io_fits
from ginga.misc import Callback, Settings
def get_warp_indexes(shape_in, wcs_in, wcs_out):
"""Get numpy index arrays to warp an image into another projection.
For every pixel coordinate for numpy array shape ``shape_in``,
convert to wcs coordinates according to ``wcs_in`` and then back
to pixel coordinates according to ``wcs_out``.
Parameters
----------
shape_in : numpy ndarray shape
The shape of the array to be projected
wcs_in : subclass of `~ginga.util.wcsmod.common.BaseWCS`
Ginga WCS wrapper that is associated with the input array
wcs_out : subclass of `~ginga.util.wcsmod.common.BaseWCS`
Ginga WCS wrapper that is associated with the output
Returns
-------
Returns a 3-tuple (old_pts, coords, new_pts), where all three
arrays are Nx2
"""
ht, wd = shape_in[:2]
yi, xi = np.mgrid[0:ht, 0:wd]
old_pts = np.array((xi.ravel(), yi.ravel())).T
# convert data coords of pixels as sky coords
coords = wcs_in.datapt_to_wcspt(old_pts)
# calc these sky points in data x, y according to the *wcs_ref*
new_pts = wcs_out.wcspt_to_datapt(coords)
return (old_pts, coords, new_pts)
def warp_image(data_in, wcs_in, wcs_out, fill=None, pixel_radius=1):
"""Warp image in 2D numpy array ``data`` to a new array.
Warps ``data_in`` using ``wcs_in`` and projecting into ``wcs_out``.
The resulting array may have empty pixels which are initially filled
with ``fill`` and then computed from the median value of surrounding
pixels at a radius of ``pixel_radius``.
Parameters
----------
data_in : numpy 2D ndarray
The array to be warped (projected)
wcs_in : subclass of `~ginga.util.wcsmod.common.BaseWCS`
Ginga WCS wrapper that is associated with the input array
wcs_out : subclass of `~ginga.util.wcsmod.common.BaseWCS`
Ginga WCS wrapper that is associated with the output
fill : scalar value or `None` (optional, defaults to `None`)
The value to initially fill the new output image array
pixel_radius : `int` (optional, defaults to 1)
The pixel radius to use for collecting values to fill empty pixels
Returns
-------
Returns a 5-tuple (data_out, old_pts, coords, new_pts, out_pts).
``data_out`` is the warped image, with an alpha mask layer attached.
``out_pts`` is a Nx2 array describing the relocated pixels in data_out.
See ``get_warp_indexes`` for discussion of the ``old_pts``, ``coords``
and ``new_pts`` return values.
"""
old_pts, coords, new_pts = get_warp_indexes(data_in.shape, wcs_in, wcs_out)
# round to nearest int
new_pts = np.rint(new_pts).astype(int)
# get bounds of array values
mn, mx = trcalc.get_bounds(new_pts)
# subtract minimums to turn pixel coordinates into offsets
out_pts = new_pts - mn
# np fancy indexing to warp array as necessary
x, y = old_pts.T
nx, ny = out_pts.T
if fill is None:
# select a suitable fill value if one is not provided
if issubclass(data_in.dtype.type, np.floating):
fill = np.nan
else:
fill = 0
# allocate new array
new_wd, new_ht = mx - mn + np.array((1, 1))
data_out = np.full((new_ht, new_wd, 2), fill, dtype=data_in.dtype)
# prepare mask, will be True where there are empty pixels in the image
mask = np.full(data_out.shape[:2], True, dtype=bool)
mask[ny, nx] = False
# fill alpha layer with zeros where we have empty pixels and ones
# otherwise
data_out[..., 1][mask] = 0
data_out[..., 1][~mask] = 1
# warp the data into the destination image
data_out[ny, nx, 0] = data_in[y, x]
if pixel_radius > 0:
# fill in holes in output image with median values of surrounding
# pixels NOTE: this also fills in alpha layer values
y, x = np.where(mask)
pr = pixel_radius
offsets = [(x, y)
for x in range(-pr, pr + 1) for y in range(-pr, pr + 1)]
offsets.remove((0, 0))
_arrs = [data_out[_y, _x]
for _x, _y in [((x + offsets[i][0]).clip(0, new_wd - 1),
(y + offsets[i][1]).clip(0, new_ht - 1))
for i in range(len(offsets))]]
with warnings.catch_warnings():
# we can get a "RuntimeWarning: All-NaN slice encountered"
# which is ok as we simply let this resolve to NaN
warnings.simplefilter("ignore")
vals = np.nanmedian(np.dstack(_arrs), axis=2)
_arrs = None # deref arrays
data_out[y, x] = vals
# finally multipy alpha layer by mx_v to achieve full opacity where it
# is wanted
mn_v, mx_v = trcalc.get_minmax_dtype(data_out.dtype)
data_out[..., 1] *= mx_v
return (data_out, old_pts, coords, new_pts, out_pts)
def mosaic_inline(baseimage, imagelist, bg_ref=None, trim_px=None,
merge=False, allow_expand=True, expand_pad_deg=0.01,
max_expand_pct=None,
update_minmax=True, suppress_callback=False):
"""Drops new images into the image `baseimage` (if there is room),
relocating them according the WCS between the two images.
"""
# Get our own (mosaic) rotation and scale
header = baseimage.get_header()
((xrot_ref, yrot_ref),
(cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header)
scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref)
# drop each image in the right place in the new data array
mydata = baseimage._get_data()
count = 1
res = []
for image in imagelist:
name = image.get('name', 'image%d' % (count))
count += 1
data_np = image._get_data()
if 0 in data_np.shape:
baseimage.logger.info("Skipping image with zero length axis")
continue
# Calculate sky position at the center of the piece
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y, coords='data')
# User specified a trim? If so, trim edge pixels from each
# side of the array
ht, wd = data_np.shape[:2]
if trim_px:
xlo, xhi = trim_px, wd - trim_px
ylo, yhi = trim_px, ht - trim_px
data_np = data_np[ylo:yhi, xlo:xhi, ...]
ht, wd = data_np.shape[:2]
# If caller asked us to match background of pieces then
# get the median of this piece
if bg_ref is not None:
bg = iqcalc.get_median(data_np)
bg_inc = bg_ref - bg
data_np = data_np + bg_inc
# Determine max/min to update our values
if update_minmax:
maxval = np.nanmax(data_np)
minval = np.nanmin(data_np)
baseimage.maxval = max(baseimage.maxval, maxval)
baseimage.minval = min(baseimage.minval, minval)
# Get rotation and scale of piece
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
baseimage.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f "
"cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2))
# scale if necessary
# TODO: combine with rotation?
if (not np.isclose(math.fabs(cdelt1), scale_x) or
not np.isclose(math.fabs(cdelt2), scale_y)):
nscale_x = math.fabs(cdelt1) / scale_x
nscale_y = math.fabs(cdelt2) / scale_y
baseimage.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
data_np, 0, 0, wd - 1, ht - 1, nscale_x, nscale_y,
logger=baseimage.logger)
# Rotate piece into our orientation, according to wcs
rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref
flip_x = False
flip_y = False
# Optomization for 180 rotations
if (np.isclose(math.fabs(rot_dx), 180.0) or
np.isclose(math.fabs(rot_dy), 180.0)):
rotdata = trcalc.transform(data_np,
flip_x=True, flip_y=True)
rot_dx = 0.0
rot_dy = 0.0
else:
rotdata = data_np
# Finish with any necessary rotation of piece
if not np.isclose(rot_dy, 0.0):
rot_deg = rot_dy
baseimage.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
#rotctr_x=ctr_x, rotctr_y=ctr_y
logger=baseimage.logger)
# Flip X due to negative CDELT1
if np.sign(cdelt1) != np.sign(cdelt1_ref):
flip_x = True
# Flip Y due to negative CDELT2
if np.sign(cdelt2) != np.sign(cdelt2_ref):
flip_y = True
if flip_x or flip_y:
rotdata = trcalc.transform(rotdata,
flip_x=flip_x, flip_y=flip_y)
# Get size and data of new image
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
# Find location of image piece (center) in our array
x0, y0 = baseimage.radectopix(ra, dec, coords='data')
# Merge piece as closely as possible into our array
# Unfortunately we lose a little precision rounding to the
# nearest pixel--can't be helped with this approach
x0, y0 = int(np.rint(x0)), int(np.rint(y0))
baseimage.logger.debug("Fitting image '%s' into mosaic at %d,%d" % (
name, x0, y0))
# This is for useful debugging info only
my_ctr_x, my_ctr_y = trcalc.get_center(mydata)
off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y
baseimage.logger.debug("centering offsets: %d,%d" % (off_x, off_y))
# Sanity check piece placement
xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x
ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y
assert (xhi - xlo == wd), \
Exception("Width differential %d != %d" % (xhi - xlo, wd))
assert (yhi - ylo == ht), \
Exception("Height differential %d != %d" % (yhi - ylo, ht))
mywd, myht = baseimage.get_size()
if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:
if not allow_expand:
raise Exception("New piece doesn't fit on image and "
"allow_expand=False")
# <-- Resize our data array to allow the new image
# determine amount to pad expansion by
expand_x = max(int(expand_pad_deg / scale_x), 0)
expand_y = max(int(expand_pad_deg / scale_y), 0)
nx1_off, nx2_off = 0, 0
if xlo < 0:
nx1_off = abs(xlo) + expand_x
if xhi > mywd:
nx2_off = (xhi - mywd) + expand_x
xlo, xhi = xlo + nx1_off, xhi + nx1_off
ny1_off, ny2_off = 0, 0
if ylo < 0:
ny1_off = abs(ylo) + expand_y
if yhi > myht:
ny2_off = (yhi - myht) + expand_y
ylo, yhi = ylo + ny1_off, yhi + ny1_off
new_wd = mywd + nx1_off + nx2_off
new_ht = myht + ny1_off + ny2_off
# sanity check on new mosaic size
old_area = mywd * myht
new_area = new_wd * new_ht
expand_pct = new_area / old_area
if ((max_expand_pct is not None) and
(expand_pct > max_expand_pct)):
raise Exception("New area exceeds current one by %.2f %%;"
"increase max_expand_pct (%.2f) to allow" %
(expand_pct * 100, max_expand_pct))
# go for it!
new_data = np.zeros((new_ht, new_wd))
# place current data into new data
new_data[ny1_off:ny1_off + myht, nx1_off:nx1_off + mywd] = \
mydata
baseimage._data = new_data
mydata = new_data
if (nx1_off > 0) or (ny1_off > 0):
# Adjust our WCS for relocation of the reference pixel
crpix1, crpix2 = baseimage.get_keywords_list('CRPIX1', 'CRPIX2')
kwds = dict(CRPIX1=crpix1 + nx1_off,
CRPIX2=crpix2 + ny1_off,
NAXIS1=new_wd, NAXIS2=new_ht)
baseimage.update_keywords(kwds)
# fit image piece into our array
try:
if merge:
mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]
else:
idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0)
mydata[ylo:yhi, xlo:xhi, ...][idx] = \
rotdata[0:ht, 0:wd, ...][idx]
except Exception as e:
baseimage.logger.error("Error fitting tile: %s" % (str(e)))
raise
res.append((xlo, ylo, xhi, yhi))
# TODO: recalculate min and max values
# Can't use usual techniques because it adds too much time to the
# mosacing
#baseimage._set_minmax()
# Notify watchers that our data has changed
if not suppress_callback:
baseimage.make_callback('modified')
return res
def mosaic(logger, itemlist, fov_deg=None):
"""
Parameters
----------
logger : logger object
a logger object passed to created AstroImage instances
itemlist : sequence like
a sequence of either filenames or AstroImage instances
"""
if isinstance(itemlist[0], AstroImage.AstroImage):
image0 = itemlist[0]
name = image0.get('name', 'image0')
else:
# Assume it is a file and load it
filepath = itemlist[0]
logger.info("Reading file '%s' ..." % (filepath))
image0 = loader.load_data(filepath, logger=logger)
name = filepath
ra_deg, dec_deg = image0.get_keywords_list('CRVAL1', 'CRVAL2')
header = image0.get_header()
(rot_deg, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)
logger.debug("image0 rot=%f cdelt1=%f cdelt2=%f" % (rot_deg,
cdelt1, cdelt2))
px_scale = math.fabs(cdelt1)
expand = False
if fov_deg is None:
# TODO: calculate fov?
expand = True
cdbase = [np.sign(cdelt1), np.sign(cdelt2)]
img_mosaic = dp.create_blank_image(ra_deg, dec_deg,
fov_deg, px_scale, rot_deg,
cdbase=cdbase,
logger=logger)
header = img_mosaic.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)
logger.debug("mosaic rot=%f cdelt1=%f cdelt2=%f" % (rot, cdelt1, cdelt2))
logger.debug("Processing '%s' ..." % (name))
tup = mosaic_inline(img_mosaic, [image0], allow_expand=expand)
logger.debug("placement %s" % (str(tup)))
count = 1
for item in itemlist[1:]:
if isinstance(item, AstroImage.AstroImage):
image = item
else:
# Create and load the image
filepath = item
logger.info("Reading file '%s' ..." % (filepath))
image = io_fits.load_file(filepath, logger=logger)
name = image.get('name', 'image%d' % (count))
logger.debug("Inlining '%s' ..." % (name))
tup = mosaic_inline(img_mosaic, [image])
logger.debug("placement %s" % (str(tup)))
count += 1
logger.info("Done.")
return img_mosaic
class ImageMosaicer(Callback.Callbacks):
"""Class for creating mosaics in a `~ginga.AstroImage.AstroImage`.
Individual tiles are transformed and inserted into the right place
in the image ndarray. The array can be automatically enlarged as
necessary to accomodate the new tiles.
Typical usage:
>>> mosaicer = ImageMosaicer(logger)
>>> mosaicer.mosaic(images)
where ``images`` is a list of `~ginga.AstroImage.AstroImage` that
should be plotted in ``viewer``.
"""
def __init__(self, logger, settings=None):
super(ImageMosaicer, self).__init__()
self.logger = logger
self.ingest_count = 0
# holds processed images to be inserted into mosaic image
self.total_images = 0
self.image_list = []
# options
if settings is None:
settings = Settings.SettingGroup(name='mosaicer',
logger=self.logger)
self.t_ = settings
self.t_.set_defaults(fov_deg=0.2,
match_bg=False, trim_px=0, merge=False,
mosaic_hdus=False, skew_limit=0.1,
allow_expand=True, expand_pad_deg=0.01,
reuse_image=False, mosaic_method='simple',
update_minmax=True, max_expand_pct=None,
annotate_images=False, annotate_color='pink',
annotate_fontsize=10.0, ann_fits_kwd=None,
ann_tag_pfx='ann_')
# these are updated in prepare_mosaic() and represent measurements
# on the reference image
self.bg_ref = 0.0
self.xrot_ref, self.yrot_ref = 0.0, 0.0
self.cdelt1_ref, self.cdelt2_ref = 1.0, 1.0
self.scale_x = 1.0
self.scale_y = 1.0
self.baseimage = None
for name in ['progress', 'finished']:
self.enable_callback(name)
def get_settings(self):
return self.t_
def prepare_mosaic(self, ref_image):
"""Prepare a new (blank) mosaic image based on the pointing of
the reference image ``ref_image``.
Returns a `~ginga.AstroImage.AstroImage`
This method is typically called internally.
"""
# if user requesting us to match backgrounds, then calculate
# median of root image and save it
data_np = ref_image.get_data()
dtype = data_np.dtype
#dtype = None
if issubclass(dtype.type, np.floating):
fill = np.nan
else:
fill = 0
fov_deg = self.t_['fov_deg']
if self.t_['match_bg']:
self.bg_ref = iqcalc.get_median(data_np)
header = ref_image.get_header()
ra_deg, dec_deg = header['CRVAL1'], header['CRVAL2']
(rot_deg, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header,
skew_threshold=self.t_['skew_limit'])
self.logger.debug("ref_image rot=%f cdelt1=%f cdelt2=%f" % (rot_deg,
cdelt1, cdelt2))
# Prepare pixel scale for each axis
px_scale = (math.fabs(cdelt1), math.fabs(cdelt2))
cdbase = [np.sign(cdelt1), np.sign(cdelt2)]
if not self.t_['reuse_image'] or self.baseimage is None:
self.logger.debug("creating blank image to hold mosaic")
# GC old mosaic
self.baseimage = None
self.baseimage = dp.create_blank_image(ra_deg, dec_deg,
fov_deg, px_scale, rot_deg,
cdbase=cdbase,
logger=self.logger,
pfx='mosaic',
dtype=dtype,
alpha=0, fill=fill)
else:
# <-- reuse image (faster)
self.logger.debug("Reusing previous mosaic image")
dp.recycle_image(self.baseimage, ra_deg, dec_deg,
fov_deg, px_scale, rot_deg,
cdbase=cdbase,
logger=self.logger,
pfx='mosaic', alpha=0, fill=fill)
header = self.baseimage.get_header()
# TODO: handle skew (differing rotation for each axis)?
(rot_xy, cdelt_xy) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("ref image rot_x=%f rot_y=%f cdelt1=%f cdelt2=%f" % (
rot_xy[0], rot_xy[1], cdelt_xy[0], cdelt_xy[1]))
# Store base image rotation and scale
self.xrot_ref, self.yrot_ref = rot_xy
self.cdelt1_ref, self.cdelt2_ref = cdelt_xy
self.scale_x = math.fabs(cdelt_xy[0])
self.scale_y = math.fabs(cdelt_xy[1])
return self.baseimage
def ingest_image(self, image):
"""Ingest an image, transform it and merge it in the right place in
the image array.
This method is typically called internally.
"""
self.ingest_count += 1
count = self.ingest_count
tag = 'image{}'.format(count)
name = image.get('name', tag)
data_np = image._get_data()
if 0 in data_np.shape:
self.logger.info("Skipping image with zero length axis")
return
# Calculate sky position at the center of the piece
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y, coords='data')
self.image_list.append((name, tag, ra, dec))
# User specified a trim? If so, trim edge pixels from each
# side of the array
ht, wd = data_np.shape[:2]
if self.t_['trim_px'] is not None:
xlo, xhi = self.t_['trim_px'], wd - self.t_['trim_px']
ylo, yhi = self.t_['trim_px'], ht - self.t_['trim_px']
data_np = data_np[ylo:yhi, xlo:xhi, ...]
ht, wd = data_np.shape[:2]
# If caller asked us to match background of pieces then
# fix up this data
if self.t_['match_bg']:
bg = iqcalc.get_median(data_np)
bg_inc = self.bg_ref - bg
data_np = data_np + bg_inc
# Determine max/min to update our values
if self.t_['update_minmax']:
maxval = np.nanmax(data_np)
minval = np.nanmin(data_np)
self.baseimage.maxval = max(self.baseimage.maxval, maxval)
self.baseimage.minval = min(self.baseimage.minval, minval)
# Get rotation and scale of piece
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f "
"cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2))
# scale if necessary to scale of reference image
if (not np.isclose(math.fabs(cdelt1), self.scale_x) or
not np.isclose(math.fabs(cdelt2), self.scale_y)):
nscale_x = math.fabs(cdelt1) / self.scale_x
nscale_y = math.fabs(cdelt2) / self.scale_y
self.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
#data_np, 0, 0, wd - 1, ht - 1, nscale_x, nscale_y,
data_np, 0, 0, wd, ht, nscale_x, nscale_y,
logger=self.logger)
mydata = self.baseimage._get_data()
method = self.t_['mosaic_method']
if method == 'simple':
self.logger.debug("plotting by rotating/flipping image by WCS")
# CASE 1: simple rotation and flips
# Rotate piece into our orientation, according to wcs
rot_dx, rot_dy = xrot - self.xrot_ref, yrot - self.yrot_ref
flip_x = False
flip_y = False
# Optomization for 180 rotations
if (np.isclose(math.fabs(rot_dx), 180.0) or
np.isclose(math.fabs(rot_dy), 180.0)):
rotdata = trcalc.transform(data_np,
flip_x=True, flip_y=True)
rot_dx = 0.0
rot_dy = 0.0
else:
rotdata = data_np
# convert to same type as basedata
rotdata = rotdata.astype(mydata.dtype)
# add an alpha layer
minv, maxv = trcalc.get_minmax_dtype(rotdata.dtype)
rotdata = trcalc.add_alpha(rotdata, alpha=maxv)
# Finish with any necessary rotation of piece
if not np.isclose(rot_dy, 0.0):
rot_deg = rot_dy
self.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
#rotctr_x=ctr_x, rotctr_y=ctr_y,
logger=self.logger, pad=0)
# Flip X due to negative CDELT1
if np.sign(cdelt1) != np.sign(self.cdelt1_ref):
flip_x = True
# Flip Y due to negative CDELT2
if np.sign(cdelt2) != np.sign(self.cdelt2_ref):
flip_y = True
if flip_x or flip_y:
rotdata = trcalc.transform(rotdata,
flip_x=flip_x, flip_y=flip_y)
# Get size and data of new image
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
# Find location of image piece (center) in our array
x0, y0 = self.baseimage.radectopix(ra, dec, coords='data')
# Merge piece as closely as possible into our array
# Unfortunately we lose a little precision rounding to the
# nearest pixel--can't be helped with this approach
x0, y0 = int(np.rint(x0)), int(np.rint(y0))
self.logger.debug("Fitting image '%s' into mosaic at %f,%f" % (
name, x0, y0))
# This is for useful debugging info only
my_ctr_x, my_ctr_y = trcalc.get_center(mydata)
off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y
self.logger.debug("centering offsets: %d,%d" % (off_x, off_y))
# Sanity check piece placement
xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x
ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y
assert (xhi - xlo == wd), \
Exception("Width differential %d != %d" % (xhi - xlo, wd))
assert (yhi - ylo == ht), \
Exception("Height differential %d != %d" % (yhi - ylo, ht))
elif method == 'warp':
# convert to same type as basedata
data_np = data_np.astype(mydata.dtype)
self.logger.debug("plotting by warping image according to WCS")
# CASE 2: user wants precise transformation of image using WCS
dst, old_pts, coords, new_pts, dst_pts = warp_image(data_np,
image.wcs,
self.baseimage.wcs)
# Merge piece as closely as possible into our array
# Unfortunately we lose a little precision rounding to the
# nearest pixel--can't be helped with this approach
xlo, ylo = np.rint(new_pts[0] - dst_pts[0]).astype(int)
self.logger.debug("Fitting image '%s' into mosaic at %f,%f" % (
name, xlo, ylo))
ht, wd = dst.shape[:2]
xhi, yhi = xlo + wd, ylo + ht
rotdata = dst
else:
raise ValueError(f"don't understand mosaic method '{method}'")
#-----------
mywd, myht = self.baseimage.get_size()
if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:
if not self.t_['allow_expand']:
raise Exception("New piece doesn't fit on image and "
"allow_expand=False")
# <-- Resize our data array to allow the new image
# determine amount to pad expansion by
expand_x = max(int(self.t_['expand_pad_deg'] / self.scale_x), 0)
expand_y = max(int(self.t_['expand_pad_deg'] / self.scale_y), 0)
nx1_off, nx2_off = 0, 0
if xlo < 0:
nx1_off = abs(xlo) + expand_x
if xhi > mywd:
nx2_off = (xhi - mywd) + expand_x
xlo, xhi = xlo + nx1_off, xhi + nx1_off
ny1_off, ny2_off = 0, 0
if ylo < 0:
ny1_off = abs(ylo) + expand_y
if yhi > myht:
ny2_off = (yhi - myht) + expand_y
ylo, yhi = ylo + ny1_off, yhi + ny1_off
new_wd = mywd + nx1_off + nx2_off
new_ht = myht + ny1_off + ny2_off
# sanity check on new mosaic size
old_area = mywd * myht
new_area = new_wd * new_ht
expand_pct = new_area / old_area
if ((self.t_['max_expand_pct'] is not None) and
(expand_pct > self.t_['max_expand_pct'])):
raise Exception("New area exceeds current one by %.2f %%;"
"increase max_expand_pct (%.2f) to allow" %
(expand_pct * 100, self.t_['max_expand_pct']))
# go for it!
#new_data = np.zeros((new_ht, new_wd))
new_data = np.full((new_ht, new_wd, 2), np.nan, dtype=mydata.dtype)
new_data[..., 1] = 0.0
# place current data into new data
new_data[ny1_off:ny1_off + myht, nx1_off:nx1_off + mywd] = mydata
self.baseimage._data = new_data
mydata = new_data
if (nx1_off > 0) or (ny1_off > 0):
# Adjust our WCS for relocation of the reference pixel
crpix1, crpix2 = self.baseimage.get_keywords_list('CRPIX1', 'CRPIX2')
kwds = dict(CRPIX1=crpix1 + nx1_off,
CRPIX2=crpix2 + ny1_off,
NAXIS1=new_wd, NAXIS2=new_ht)
self.baseimage.update_keywords(kwds)
# fit image piece into our array
try:
if self.t_['merge']:
mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]
else:
mask = (mydata[ylo:yhi, xlo:xhi, 1] <= 0.0)
mydata[ylo:yhi, xlo:xhi, ...][mask] = rotdata[0:ht, 0:wd, ...][mask]
except Exception as e:
self.logger.error("Error fitting tile: %s" % (str(e)))
raise
return (xlo, ylo, xhi, yhi)
def ingest_one(self, image):
"""Ingest an image in the right place in the image array.
This method is typically called internally.
"""
llur = self.ingest_image(image)
self.make_callback('progress', 'fitting',
float(self.ingest_count) / self.total_images)
def reset(self):
"""Prepare for a new mosaic.
The next call to ```mosaic`` will create a new mosaic.
"""
self.baseimage = None
self.image_list = []
self.ingest_count = 0
self.total_images = 0
def annotate_images(self, canvas):
tagpfx = self.t_['ann_tag_pfx']
tags = canvas.get_tags_by_tag_pfx(tagpfx)
canvas.delete_objects_by_tag(tags, redraw=False)
if self.t_['annotate_images']:
dc = canvas.get_draw_classes()
for name, tag, ra, dec in self.image_list:
x, y = self.baseimage.radectopix(ra, dec)
text = dc.Text(x, y, name,
color=self.t_['annotate_color'],
fontsize=self.t_['annotate_fontsize'],
fontscale=True)
tag = tagpfx + tag
canvas.add(text, tag=tag, redraw=False)
canvas.update_canvas(whence=3)
def mosaic(self, images, ev_intr=None):
"""Create a mosaic of ``images``.
Returns a `~ginga.AstroImage.AstroImage`
"""
num_images = len(images)
if num_images == 0:
return
self.total_images += num_images
self.make_callback('progress', 'fitting', 0.0)
t1 = time.time()
# If there is no current mosaic then prepare a new one
if self.baseimage is None:
ref_image = images[0]
self.prepare_mosaic(ref_image)
self.logger.info("fitting tiles...")
for image in images:
if ev_intr is not None and ev_intr.is_set():
raise Exception("interrupted by user")
self.ingest_one(image)
self.logger.info("finishing...")
self.make_callback('progress', 'finishing', 0.0)
self.process_elapsed = time.time() - t1
self.logger.info("mosaic done. process=%.4f (sec)" % (
self.process_elapsed))
self.make_callback('finished', self.process_elapsed)
return self.baseimage
class CanvasMosaicer(Callback.Callbacks):
"""Class for creating collages on a Ginga canvas.
A collage is sort of like a mosaic, except that instead of creating a
large image array, individual tiles are transformed and plotted on a
canvas.
Typical usage:
>>> collager = CanvasMosaicer(logger)
>>> collager.mosaic(viewer, images)
where ``images`` is a list of `~ginga.AstroImage.AstroImage` that
should be plotted in ``viewer``.
"""
def __init__(self, logger, settings=None):
super(CanvasMosaicer, self).__init__()
self.logger = logger
if settings is None:
settings = Settings.SettingGroup(name='collager',
logger=self.logger)
# options
self.t_ = settings
self.t_.set_defaults(annotate_images=False, annotate_color='pink',
annotate_fontsize=10.0, ann_fits_kwd=None,
ann_tag_pfx='ann_',
match_bg=False, collage_method='simple',
center_image=False)
self.ingest_count = 0
# holds processed images to be inserted into mosaic image
self.total_images = 0
# these are updated in prepare_mosaic() and represent measurements
# on the reference image
self.bg_ref = 0.0
self.xrot_ref, self.yrot_ref = 0.0, 0.0
self.cdelt1_ref, self.cdelt2_ref = 1.0, 1.0
self.scale_x = 1.0
self.scale_y = 1.0
self.limits = None
self.ref_image = None
self.image_list = []
for name in ['progress', 'finished']:
self.enable_callback(name)
def get_settings(self):
return self.t_
def prepare_mosaic(self, ref_image):
"""Prepare for a new mosaic image based on the pointing of
the reference image ``ref_image``.
This method is typically called internally.
"""
self.ref_image = ref_image
# if user requesting us to match backgrounds, then calculate
# median of root image and save it
if self.t_['match_bg']:
data_np = ref_image.get_data()
self.bg_ref = iqcalc.get_median(data_np)
header = ref_image.get_header()
# TODO: handle skew (differing rotation for each axis)?
(rot_xy, cdelt_xy) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("ref image rot_x=%f rot_y=%f cdelt1=%f cdelt2=%f" % (
rot_xy[0], rot_xy[1], cdelt_xy[0], cdelt_xy[1]))
# Store base image rotation and scale
self.xrot_ref, self.yrot_ref = rot_xy
self.cdelt1_ref, self.cdelt2_ref = cdelt_xy
self.scale_x = math.fabs(cdelt_xy[0])
self.scale_y = math.fabs(cdelt_xy[1])
self.limits = ((0, 0), (0, 0))
def _get_name_tag(self, image):
self.ingest_count += 1
tag = 'image{}'.format(self.ingest_count)
ann_fits_kwd = self.t_['ann_fits_kwd']
if ann_fits_kwd is not None:
header = image.get_header()
name = str(header[ann_fits_kwd])
else:
name = image.get('name', tag)
tag = image.get('tag', tag)
return (name, tag)
def transform_image(self, image):
"""Prepare ``image`` to be plotted in the right place according to
the reference image WCS. A new image is returned.
This method is typically called internally.
"""
name, tag = self._get_name_tag(image)
data_np = image._get_data()
if 0 in data_np.shape:
self.logger.info("Skipping image with zero length axis")
return
ht, wd = data_np.shape
# If caller asked us to match background of pieces then
# fix up this data
if self.t_['match_bg']:
bg = iqcalc.get_median(data_np)
bg_inc = self.bg_ref - bg
data_np = data_np + bg_inc
# Calculate sky position at the center of the piece
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y, coords='data')
self.image_list.append((name, tag, ra, dec))
# Get rotation and scale of piece
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f "
"cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2))
# scale if necessary to scale of reference image
if (not np.isclose(math.fabs(cdelt1), self.scale_x) or
not np.isclose(math.fabs(cdelt2), self.scale_y)):
nscale_x = math.fabs(cdelt1) / self.scale_x
nscale_y = math.fabs(cdelt2) / self.scale_y
self.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
#data_np, 0, 0, wd - 1, ht - 1, nscale_x, nscale_y,
data_np, 0, 0, wd, ht, nscale_x, nscale_y,
logger=self.logger)
method = self.t_['collage_method']
if method == 'simple':
self.logger.debug("plotting by rotating/flipping image by WCS")
# CASE 1: simple rotation and flips
# Rotate piece into our orientation, according to wcs
rot_dx, rot_dy = xrot - self.xrot_ref, yrot - self.yrot_ref
flip_x = False
flip_y = False
# Optomization for 180 rotations
if (np.isclose(math.fabs(rot_dx), 180.0) or
np.isclose(math.fabs(rot_dy), 180.0)):
rotdata = trcalc.transform(data_np,
flip_x=True, flip_y=True)
rot_dx = 0.0
rot_dy = 0.0
else:
rotdata = data_np
# Finish with any necessary rotation of piece
ignore_alpha = False
if not np.isclose(rot_dy, 0.0):
rot_deg = rot_dy
minv, maxv = trcalc.get_minmax_dtype(rotdata.dtype)
rotdata = trcalc.add_alpha(rotdata, alpha=maxv)
self.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
#rotctr_x=ctr_x, rotctr_y=ctr_y,
logger=self.logger, pad=0)
ignore_alpha = True
# Flip X due to negative CDELT1
if np.sign(cdelt1) != np.sign(self.cdelt1_ref):
flip_x = True
# Flip Y due to negative CDELT2
if np.sign(cdelt2) != np.sign(self.cdelt2_ref):
flip_y = True
if flip_x or flip_y:
rotdata = trcalc.transform(rotdata,
flip_x=flip_x, flip_y=flip_y)
# new wrapper for transformed image
metadata = dict(header=header, ignore_alpha=ignore_alpha)
new_image = AstroImage.AstroImage(data_np=rotdata, metadata=metadata)
# Get size and data of new image
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
# Find location of image piece (center) in our array
x0, y0 = self.ref_image.radectopix(ra, dec, coords='data')
#x0, y0 = int(np.round(x0)), int(np.round(y0))
self.logger.debug("Fitting image '%s' into mosaic at %f,%f" % (
name, x0, y0))
# update limits
xlo, xhi = x0 - ctr_x, x0 + ctr_x
ylo, yhi = y0 - ctr_y, y0 + ctr_y
elif method == 'warp':
# Need to convert to float for this type
data_np = data_np.astype(np.float32)
self.logger.debug("plotting by warping image according to WCS")
# CASE 2: user wants precise transformation of image using WCS
dst, old_pts, coords, new_pts, dst_pts = warp_image(data_np,
image.wcs,
self.ref_image.wcs)
# new wrapper for transformed image
metadata = dict(header=header, ignore_alpha=True)
new_image = AstroImage.AstroImage(data_np=dst, metadata=metadata)
# find x, y at which to plot image
xlo, ylo = new_pts[0] - dst_pts[0]
self.logger.debug("Fitting image '%s' into mosaic at %f,%f" % (
name, xlo, ylo))
new_ht, new_wd = dst.shape[:2]
xhi, yhi = xlo + new_wd, ylo + new_ht
else:
raise ValueError(f"don't understand mosaic method '{method}'")
new_image.set(xpos=xlo, ypos=ylo, name=name, tag=tag)
# calculate new limits of canvas
_xlo, _ylo, = self.limits[0]
_xhi, _yhi, = self.limits[1]
_xlo, _ylo = min(_xlo, xlo), min(_ylo, ylo)
_xhi, _yhi = max(_xhi, xhi), max(_yhi, yhi)
self.limits = ((_xlo, _ylo), (_xhi, _yhi))
return new_image
def annotate_images(self, canvas):
tagpfx = self.t_['ann_tag_pfx']
tags = canvas.get_tags_by_tag_pfx(tagpfx)
canvas.delete_objects_by_tag(tags, redraw=False)
if self.t_['annotate_images']:
dc = canvas.get_draw_classes()
for name, tag, ra, dec in self.image_list:
x, y = self.ref_image.radectopix(ra, dec)
text = dc.Text(x, y, name,
color=self.t_['annotate_color'],
fontsize=self.t_['annotate_fontsize'],
fontscale=True)
tag = tagpfx + tag
canvas.add(text, tag=tag, redraw=False)
canvas.update_canvas(whence=3)
def plot_image(self, canvas, image):
"""Plot a new image created by ``transform_image()`` on ``canvas``.
This is typically called internally.
"""
dc = canvas.get_draw_classes()
xpos, ypos, name, tag = image.get_list('xpos', 'ypos',
'name', 'tag')
img = dc.NormImage(xpos, ypos, image)
img.is_data = True
canvas.add(img, tag=tag, redraw=False)
def reset(self):
"""Prepare for a new mosaic.
The next call to ```mosaic`` will create a new mosaic.
"""
self.ref_image = None
self.ingest_count = 0
self.total_images = 0
self.image_list = []
def ingest_one(self, canvas, image):
"""Plot ``image`` in the right place on the ``canvas``.
This is typically called internally.
"""
new_image = self.transform_image(image)
self.plot_image(canvas, new_image)
self.make_callback('progress', 'fitting',
float(self.ingest_count) / self.total_images)
def mosaic(self, viewer, images, canvas=None, ev_intr=None):
"""Plot a mosaic of ``images`` in ``viewer`` on ``canvas``.
If ``canvas`` is `None` the viewer's default canvas is used.
"""
images = list(images) # because we might pop(0)
num_images = len(images)
if num_images == 0:
return
self.total_images += num_images
self.make_callback('progress', 'fitting', 0.0)
t1 = time.time()
if canvas is None:
canvas = viewer.get_canvas()
with viewer.suppress_redraw:
# If there is no current mosaic then prepare a new one
if self.ref_image is None:
ref_image = images.pop(0)
name, tag = self._get_name_tag(ref_image)
self.prepare_mosaic(ref_image)
# TODO: delete only items we may have added
canvas.delete_all_objects(redraw=False)
# first image is loaded in the usual way
viewer.set_image(ref_image)
self.limits = viewer.get_limits()
# save position of reference image for annotation
name, tag = self._get_name_tag(ref_image)
wd, ht = ref_image.get_size()
ctr_x, ctr_y = wd * 0.5, ht * 0.5
ctr_ra, ctr_dec = ref_image.pixtoradec(ctr_x, ctr_y)
self.image_list.append((name, tag, ctr_ra, ctr_dec))
self.logger.info("fitting tiles...")
for image in images:
time.sleep(0)
if ev_intr is not None and ev_intr.is_set():
raise Exception("interrupted by user")
self.ingest_one(canvas, image)
self.logger.info("finishing...")
self.annotate_images(canvas)
self.make_callback('progress', 'finishing', 0.0)
viewer.set_limits(self.limits)
if self.t_['center_image']:
viewer.center_image()
canvas.update_canvas(whence=0)
self.process_elapsed = time.time() - t1
self.logger.info("collage done. process=%.4f (sec)" % (
self.process_elapsed))
self.make_callback('finished', self.process_elapsed)
| bsd-3-clause | 46bf48faa16495bdb693797bab69d167 | 37.040883 | 100 | 0.533875 | 3.520278 | false | false | false | false |
ejeschke/ginga | ginga/examples/matplotlib/example3_mpl.py | 2 | 13514 | #! /usr/bin/env python
#
# example3_mpl.py -- Copy attributes from a Ginga Qt widget into a Matplotlib
# figure.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
#
"""
$ ./example3_mpl.py [fits file]
example3 displays a native ginga widget beside a matplotlib figure as two
panes. A fits file can be dropped into the left pane and manipulated using
the standard Ginga interactive controls
see (http://ginga.readthedocs.io/en/latest/quickref.html).
Drop down boxes allow the color map to be changed.
The right pane has two buttons under it: pressing each button sets up a
different kind of plot in the mpl pane based on the current state of the
ginga pane.
You need Qt5/Qt6 with pyqt bindings (or pyside) installed to run this
example.
"""
import sys
import matplotlib
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from ginga.qtw.ImageViewQt import CanvasView
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp
from ginga import cmap, imap
from ginga.misc import log
from ginga.util.loader import load_data
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger):
super(FitsViewer, self).__init__()
self.logger = logger
menubar = self.menuBar()
# create a File pulldown menu, and add it to the menu bar
filemenu = menubar.addMenu("File")
item = QtGui.QAction("Open File", menubar)
item.triggered.connect(self.open_file)
filemenu.addAction(item)
sep = QtGui.QAction(menubar)
sep.setSeparator(True)
filemenu.addAction(sep)
item = QtGui.QAction("Quit", menubar)
item.triggered.connect(self.close)
filemenu.addAction(item)
# Add matplotlib color maps to our built in ones
cmap.add_matplotlib_cmaps()
self.cmaps = cmap.get_names()
self.imaps = imap.get_names()
wd, ht = 500, 500
# Create a Ginga widget
fi = CanvasView(logger, render='widget')
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.set_callback('drag-drop', self.drop_file_cb)
fi.set_callback('cursor-changed', self.cursor_cb)
fi.set_bg(0.2, 0.2, 0.2)
fi.show_mode_indicator(True, corner='ur')
fi.ui_set_active(True)
self.fitsimage = fi
fi.show_mode_indicator(True, corner='ur')
fi.show_color_bar(True)
# enable various key and mouse controlled actions
bd = fi.get_bindings()
bd.enable_all(True)
self.cp_tag = 'compass'
# pack widget into layout
gingaw = fi.get_widget()
gingaw.resize(wd, ht)
vbox1 = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
layout.addWidget(gingaw, stretch=1)
self.cm = cmap.get_cmap('gray')
self.im = imap.get_imap('ramp')
# color map selection widget
wcmap = QtGui.QComboBox()
for name in self.cmaps:
wcmap.addItem(name)
index = self.cmaps.index('gray')
wcmap.setCurrentIndex(index)
wcmap.activated.connect(self.set_cmap_cb)
self.wcmap = wcmap
# intensity map selection widget
wimap = QtGui.QComboBox()
for name in self.imaps:
wimap.addItem(name)
index = self.imaps.index('ramp')
wimap.setCurrentIndex(index)
wimap.activated.connect(self.set_cmap_cb)
self.wimap = wimap
#wopen = QtGui.QPushButton("Open File")
#wopen.clicked.connect(self.open_file)
# add buttons to layout
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
hbox.addStretch(1)
for w in (wcmap, wimap):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
layout.addWidget(hw, stretch=0)
vbox1.setLayout(layout)
# Create a matplotlib Figure
#self.fig = matplotlib.figure.Figure(figsize=(wd, ht))
self.fig = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.fig)
vbox2 = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.canvas, stretch=1)
# Add matplotlib buttons
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wgetimg = QtGui.QPushButton("Get Data")
wgetimg.clicked.connect(self.get_image)
wgetrgb = QtGui.QPushButton("Get RGB")
wgetrgb.clicked.connect(self.get_rgb_image)
#wquit = QtGui.QPushButton("Quit")
#wquit.clicked.connect(self.close)
hbox.addStretch(1)
for w in (wgetimg, wgetrgb):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
layout.addWidget(hw, stretch=0)
vbox2.setLayout(layout)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
w = QtGui.QWidget()
layout = QtGui.QHBoxLayout()
layout.addWidget(vbox1, stretch=1)
layout.addWidget(vbox2, stretch=1)
w.setLayout(layout)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
vw = QtGui.QWidget()
vw.setLayout(vbox)
self.setCentralWidget(vw)
def set_cmap_cb(self, kind):
index = self.wcmap.currentIndex()
cmap_name = self.cmaps[index]
self.cm = cmap.get_cmap(cmap_name)
index = self.wimap.currentIndex()
imap_name = self.imaps[index]
self.im = imap.get_imap(imap_name)
self.fitsimage.set_cmap(self.cm)
self.fitsimage.set_imap(self.im)
def clear_canvas(self):
canvas = self.fitsimage.get_canvas()
canvas.delete_all_objects()
def load_file(self, filepath):
image = load_data(filepath, logger=self.logger)
self.fitsimage.set_image(image)
self.setWindowTitle(filepath)
# create compass
try:
try:
canvas = self.fitsimage.get_canvas()
canvas.delete_object_by_tag(self.cp_tag)
except KeyError:
pass
width, height = image.get_size()
x, y = width / 2.0, height / 2.0
# radius we want the arms to be (approx 1/4 the largest dimension)
radius = float(max(width, height)) / 4.0
canvas = self.fitsimage.get_canvas()
Compass = canvas.get_draw_class('compass')
canvas.add(Compass(x, y, radius, color='skyblue',
fontsize=14), tag=self.cp_tag)
except Exception as e:
self.logger.warning("Can't calculate compass: %s" % (
str(e)))
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file_cb(self, viewer, paths):
filename = paths[0]
self.load_file(filename)
def closeEvent(self, ce):
self.close()
def cursor_cb(self, viewer, button, data_x, data_y):
"""This gets called when the data position relative to the cursor
changes.
"""
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x + viewer.data_off),
int(data_y + viewer.data_off))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def calculate_aspect(self, shape, extent):
dx = abs(extent[1] - extent[0]) / float(shape[1])
dy = abs(extent[3] - extent[2]) / float(shape[0])
return dx / dy
def make_mpl_colormap(self, fitsimage):
# make the equivalent color map for matplotlib
rgbmap = fitsimage.get_rgbmap()
cm = rgbmap.get_cmap()
mpl_cm = cmap.ginga_to_matplotlib_cmap(cm)
return mpl_cm
def get_wcs_extent(self, image, x0, y0, x1, y1):
# WCS of the area
ra0, dec0 = image.pixtoradec(x0, y0, format='deg', coords='data')
ra1, dec1 = image.pixtoradec(x1, y1, format='deg', coords='data')
extent = (ra0, ra1, dec0, dec1)
return extent
def get_rgb_image(self):
fi = self.fitsimage
# clear previous image
self.fig.clf()
# Grab the RGB array for the current image and place it in the
# matplotlib figure axis
arr = fi.getwin_array(order='RGB')
# force aspect ratio of figure to match
wd, ht = fi.get_window_size()
# Get the data extents
x0, y0 = fi.get_data_xy(0, 0)
x1, y1 = fi.get_data_xy(wd - 1, ht - 1)
flipx, flipy, swapxy = fi.get_transforms()
if swapxy:
x0, x1, y0, y1 = y0, y1, x0, x1
xlabel = 'dec'
ylabel = 'ra'
else:
xlabel = 'ra'
ylabel = 'dec'
#extent = (x0, x1, y1, y0)
image = fi.get_image()
extent = self.get_wcs_extent(image, x0, x1, y1, y0)
#print "extent=%s" % (str(extent))
# Calculate aspect ratio
aspect = self.calculate_aspect(arr.shape, extent)
#ax = self.fig.add_subplot(111, adjustable='box', aspect=aspect)
ax = self.fig.add_subplot(111)
ax.autoscale(True, tight=True)
ax.set_anchor('C')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# make the equivalent color map for matplotlib
self.make_mpl_colormap(fi)
ax.imshow(arr, interpolation="nearest", origin="upper",
vmin=0, vmax=255,
extent=extent,
aspect=aspect)
# force an update of the figure
self.fig.canvas.draw()
def get_image(self):
fi = self.fitsimage
# clear previous image
self.fig.clf()
ax = self.fig.add_subplot(111)
ax.autoscale(True, tight=True)
x0, y0, x1, y1 = tuple(map(int, fi.get_datarect()))
#extent = (x0, x1, y0, y1)
image = fi.get_image()
arr = image.cutout_data(x0, y0, x1, y1)
extent = self.get_wcs_extent(image, x0, y0, x1, y1)
# get cut levels
loval, hival = fi.get_cut_levels()
# make the equivalent color map for matplotlib
cm = self.make_mpl_colormap(fi)
# add the image to the figure
interp = 'nearest'
img = ax.imshow(arr, interpolation=interp, origin="lower",
vmin=loval, vmax=hival, cmap=cm,
aspect="equal", extent=extent)
# add a colorbar
self.fig.colorbar(img, orientation='vertical')
# force an update of the figure
self.fig.canvas.draw()
def main(options, args):
if QtHelp.have_pyqt6 or QtHelp.have_pyside6:
QtGui.QApplication.setHighDpiScaleFactorRoundingPolicy(
QtCore.Qt.HighDpiScaleFactorRoundingPolicy.Floor)
app = QtGui.QApplication(args)
logger = log.get_logger(name="example3", options=options)
w = FitsViewer(logger)
w.resize(1024, 540)
w.show()
app.setActiveWindow(w)
w.raise_()
w.activateWindow()
if len(args) > 0:
w.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
log.addlogopts(argprs)
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause | 2e8c0487dd0fd44b80301aa35ea58efc | 29.995413 | 100 | 0.578585 | 3.604695 | false | false | false | false |
ejeschke/ginga | ginga/modes/dist.py | 2 | 3696 | #
# dist.py -- mode for controlling color distribution
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""Dist Mode enables bindings that can adjust the color distribution
of an image in a Ginga image viewer.
These algorithms are similar to "curves" type profiles: "linear",
"log", "power", "sqrt", "squared", "asinh", "sinh", "histeq"
Enter the mode by
-----------------
* Space, then "d"
Exit the mode by
----------------
* Esc
Default bindings in mode
------------------------
* D : reset the color distribution algorithm to "linear"
* b, up arrow : select the previous distribution algorithm in the list
* n, down arrow : select the next distribution algorithm in the list
* scroll : select the color distribution algorithm by scrolling
"""
from ginga.modes.mode_base import Mode
class DistMode(Mode):
def __init__(self, viewer, settings=None):
super().__init__(viewer, settings=settings)
self.actions = dict(
dmod_dist=['__d', None, None],
kp_dist_reset=['D', 'dist+d', 'dist+D'],
kp_dist_prev=['dist+up', 'dist+b'],
kp_dist_next=['dist+down', 'dist+n'],
sc_dist=['dist+scroll'])
def __str__(self):
return 'dist'
@property
def cancmap(self):
bd = self.viewer.get_bindings()
return bd.get_feature_allow('cmap')
def start(self):
pass
def stop(self):
self.onscreen_message(None)
def _cycle_dist(self, viewer, msg, direction='down'):
msg = self.settings.get('msg_dist', msg)
rgbmap = viewer.get_rgbmap()
algs = rgbmap.get_hash_algorithms()
algname = rgbmap.get_hash_algorithm()
idx = algs.index(algname)
if direction == 'down':
idx = (idx + 1) % len(algs)
else:
idx = idx - 1
if idx < 0:
idx = len(algs) - 1
algname = algs[idx]
rgbmap.set_hash_algorithm(algname)
if msg:
self.onscreen_message("Color dist: %s" % (algname),
delay=1.0)
def _reset_dist(self, viewer, msg):
msg = self.settings.get('msg_dist', msg)
rgbmap = viewer.get_rgbmap()
algname = 'linear'
rgbmap.set_hash_algorithm(algname)
if msg:
self.onscreen_message("Color dist: %s" % (algname),
delay=1.0)
##### KEYBOARD ACTION CALLBACKS #####
def kp_dist(self, viewer, event, data_x, data_y, msg=True):
if not self.cancmap:
return False
event.accept()
self._cycle_dist(viewer, msg)
def kp_dist_reset(self, viewer, event, data_x, data_y, msg=True):
if not self.cancmap:
return False
event.accept()
self._reset_dist(viewer, msg)
def kp_dist_prev(self, viewer, event, data_x, data_y, msg=True):
if not self.cancmap:
return False
event.accept()
self._cycle_dist(viewer, msg, direction='up')
def kp_dist_next(self, viewer, event, data_x, data_y, msg=True):
if not self.cancmap:
return False
event.accept()
self._cycle_dist(viewer, msg, direction='down')
##### SCROLL ACTION CALLBACKS #####
def sc_dist(self, viewer, event, msg=True):
"""Interactively change the color distribution algorithm
by scrolling.
"""
if not self.cancmap:
return False
event.accept()
direction = self.get_direction(event.direction)
self._cycle_dist(viewer, msg, direction=direction)
##### MOUSE ACTION CALLBACKS #####
| bsd-3-clause | a43e98fa73c8c597bed9261ba1bbc9db | 28.568 | 70 | 0.570617 | 3.710843 | false | false | false | false |
ejeschke/ginga | ginga/web/pgw/ipg.py | 2 | 17276 | #! /usr/bin/env python
#
# ipg.py -- Module for simple FITS viewer in an HTML5 canvas web browser.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
This example illustrates using a Ginga widget in a web browser, All the
rendering is done on the server side and the browser only acts as a display
front end. Using this you could create an analysis type environment on a
server and view it via a browser.
See example usage with an ipython notebook at:
https://gist.github.com/ejeschke/6067409
You will need a reasonably modern web browser with HTML5 canvas support.
Tested with Chromium 41.0.2272.76, Firefox 37.0.2, Safari 7.1.6
"""
import sys
import os
import logging
import threading
import asyncio
import tornado.web
from ginga import AstroImage
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.misc import log, Bunch
from ginga.Bindings import ImageViewBindings
from ginga.misc.Settings import SettingGroup
from ginga.util.paths import ginga_home
from ginga.util import loader
from ginga.web.pgw import js, PgHelp, Widgets, Viewers
class BasicCanvasView(Viewers.CanvasView):
def build_gui(self, container):
"""
This is responsible for building the viewer's UI. It should
place the UI in `container`. Override this to make a custom
UI.
"""
vbox = Widgets.VBox()
vbox.set_border_width(0)
w = Viewers.GingaViewerWidget(viewer=self)
vbox.add_widget(w, stretch=1)
# need to put this in an hbox with an expanding label or the
# browser wants to resize the canvas, distorting it
hbox = Widgets.HBox()
hbox.add_widget(vbox, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
container.set_widget(hbox)
def embed(self, width=600, height=650):
"""
Embed a viewer into a Jupyter notebook.
"""
from IPython.display import IFrame
return IFrame(self.url, width, height)
def open(self, new=1):
"""
Open this viewer in a new browser window or tab.
(requires `webbrowser` module)
"""
import webbrowser
webbrowser.open(self.url, new=new)
def show(self, fmt=None):
"""
Capture the window of a viewer.
"""
# force any delayed redraws
# TODO: this really needs to be addressed in get_rgb_image_as_bytes()
# of the various superclasses, as it affects other backends as well
self.redraw_now()
from IPython.display import Image
if fmt is None:
# what format are we using for the HTML5 canvas--use that
settings = self.get_settings()
fmt = settings.get('html5_canvas_format', 'png')
return Image(data=bytes(self.get_rgb_image_as_bytes(format=fmt)),
format=fmt, embed=True)
def load_fits(self, filepath):
"""
Load a FITS file into the viewer.
"""
image = loader.load_data(filepath, logger=self.logger)
self.set_image(image)
load = load_fits
def load_hdu(self, hdu):
"""
Load an HDU into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_hdu(hdu)
self.set_image(image)
def load_data(self, data_np):
"""
Load raw numpy data into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.set_data(data_np)
self.set_image(image)
def add_canvas(self, tag=None):
# add a canvas to the view
my_canvas = self.get_canvas()
DrawingCanvas = my_canvas.get_draw_class('drawingcanvas')
canvas = DrawingCanvas()
# enable drawing on the canvas
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype(None)
canvas.ui_set_active(True)
canvas.set_surface(self)
canvas.register_for_cursor_drawing(self)
# add the canvas to the view.
my_canvas.add(canvas, tag=tag)
canvas.set_draw_mode(None)
return canvas
def set_html5_canvas_format(self, fmt):
"""
Sets the format used for rendering to the HTML5 canvas.
'png' offers greater clarity, especially for small text, but
does not have as good of performance as 'jpeg'.
"""
fmt = fmt.lower()
if fmt not in ('jpeg', 'png'):
raise ValueError("Format must be one of {jpeg|png} not '%s'" % (
fmt))
settings = self.get_settings()
settings.set(html5_canvas_format=fmt)
def get_html5_canvas_format(self):
settings = self.get_settings()
return settings.get('html5_canvas_format')
class EnhancedCanvasView(BasicCanvasView):
"""
Like BasicCanvasView, but includes a readout widget for when the
cursor is moved over the canvas to display the coordinates.
"""
def build_gui(self, container):
"""
This is responsible for building the viewer's UI. It should
place the UI in `container`.
"""
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
w = Viewers.GingaViewerWidget(viewer=self)
vbox.add_widget(w, stretch=1)
# set up to capture cursor movement for reading out coordinates
# coordinates reported in base 1 or 0?
self.pixel_base = 1.0
self.readout = Widgets.Label("")
vbox.add_widget(self.readout, stretch=0)
#self.set_callback('none-move', self.motion_cb)
self.set_callback('cursor-changed', self.motion_cb)
# need to put this in an hbox with an expanding label or the
# browser wants to resize the canvas, distorting it
hbox = Widgets.HBox()
hbox.add_widget(vbox, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
container.set_widget(hbox)
def motion_cb(self, viewer, button, data_x, data_y):
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x + 0.5), int(data_y + 0.5))
except Exception:
value = None
pb = self.pixel_base
fits_x, fits_y = data_x + pb, data_y + pb
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def set_readout_text(self, text):
self.readout.set_text(text)
class ViewerFactory(object):
"""
This is a factory class that churns out web viewers for a web
application.
The most important method of interest is get_viewer().
"""
def __init__(self, logger, app):
"""
Parameters
----------
logger : python compatible logger
a logging-module compatible logger object
app : ginga pgw web application object
"""
self.logger = logger
self.app = app
self.dc = get_canvas_types()
# dict of viewers
self.viewers = {}
def make_viewer(self, window, viewer_class=None,
width=512, height=512):
if viewer_class is None:
viewer_class = EnhancedCanvasView
# load binding preferences if available
cfgfile = os.path.join(ginga_home, "ipg_bindings.cfg")
bindprefs = SettingGroup(name='bindings', logger=self.logger,
preffile=cfgfile)
bindprefs.load(onError='silent')
bd = ImageViewBindings(self.logger, settings=bindprefs)
fi = viewer_class(self.logger, bindings=bd)
fi.url = window.url
# set up some reasonable defaults--user can change these later
# if desired
fi.set_autocut_params('zscale')
fi.enable_autocuts('on')
fi.enable_autozoom('on')
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_set_active(True)
fi.ipg_parent = self
# enable most key/mouse operations
bd = fi.get_bindings()
bd.enable_all(True)
# set up a non-private canvas for drawing
canvas = self.dc.DrawingCanvas()
canvas.set_surface(fi)
# add canvas to view
private_canvas = fi.get_canvas()
private_canvas.add(canvas)
canvas.ui_set_active(True)
fi.set_canvas(canvas)
fi.set_desired_size(width, height)
# force allocation of a surface--may be resized later
fi.configure_surface(width, height)
# add little mode indicator that shows modal states in
# the corner
fi.show_mode_indicator(True)
# Have the viewer build it's UI into the container
fi.build_gui(window)
v_info = Bunch.Bunch(url=window.url, viewer=fi,
top=window)
return v_info
def get_viewer(self, v_id, viewer_class=None, width=512, height=512,
force_new=False):
"""
Get an existing viewer by viewer id. If the viewer does not yet
exist, make a new one.
"""
if not force_new:
try:
return self.viewers[v_id]
except KeyError:
pass
# create top level window
window = self.app.make_window("Viewer %s" % v_id, wid=v_id)
# We get back a record with information about the viewer
v_info = self.make_viewer(window, viewer_class=viewer_class,
width=width, height=height)
# Save it under this viewer id
self.viewers[v_id] = v_info
return v_info
def delete_viewer(self, v_id):
del self.viewers[v_id]
def delete_all_viewers(self):
self.viewers = {}
class FileHandler(tornado.web.RequestHandler):
"""
This is a handler that is started to allow a REST-type web API to
create and manipulate viewers.
Currently it only allows the following commands:
.../viewer?id=v1&cmd=get Create/access a viewer
.../viewer?id=v1&cmd=load&path=... Load the viewer
"""
def initialize(self, name, factory):
self.name = name
self.factory = factory
self.logger = factory.logger
self.logger.debug("filehandler initialize")
def get(self):
self.logger.debug("filehandler get")
# Collect arguments
# TODO: width, height?
cmd = self.get_argument('cmd', 'get')
v_id = self.get_argument('id', 'v1')
v_info = self.factory.get_viewer(v_id)
if cmd == 'get':
self._do_get(v_info)
elif cmd == 'load':
self._do_load(v_info)
def _do_get(self, v_info):
# Get window
window = v_info.top
# render back to caller
output = window.render()
self.write(output)
def _do_load(self, v_info):
path = self.get_argument('path', None)
if path is not None:
v_info.viewer.load_fits(path)
class WebServer(object):
def __init__(self, app, factory,
host='localhost', port=9909, ev_quit=None,
viewer_class=None):
self.host = host
self.port = port
self.app = app
self.logger = app.logger
self.factory = factory
if ev_quit is None:
ev_quit = threading.Event()
self.ev_quit = ev_quit
self.default_viewer_class = viewer_class
self.server = None
self.http_server = None
def start(self, use_thread=True, no_ioloop=False):
js_path = os.path.dirname(js.__file__)
self.server = tornado.web.Application([
(r"/js/(.*\.js)", tornado.web.StaticFileHandler,
{"path": js_path}),
(r"/viewer", FileHandler,
dict(name='Ginga', factory=self.factory)),
(r"/app", PgHelp.WindowHandler,
dict(name='Application', url='/app', app=self.app)),
(r"/app/socket", PgHelp.ApplicationHandler,
dict(name='Ginga', app=self.app)),
], factory=self.factory, logger=self.logger)
self.http_server = self.server.listen(self.port, self.host)
if no_ioloop:
self.t_ioloop = None
else:
try:
# NOTE: tornado now uses the asyncio event loop
self.t_ioloop = asyncio.get_running_loop()
except RuntimeError as ex:
self.t_ioloop = asyncio.new_event_loop()
asyncio.set_event_loop(self.t_ioloop)
def stop(self):
# how to stop tornado server?
if self.t_ioloop is not None:
self.t_ioloop.stop()
self.ev_quit.set()
# stop and dereference the tornado server
if self.http_server is not None:
self.http_server.stop()
self.http_server = None
self.server = None
def get_viewer(self, v_id, viewer_class=None, width=512, height=512,
force_new=False):
if viewer_class is None:
viewer_class = self.default_viewer_class
v_info = self.factory.get_viewer(v_id, viewer_class=viewer_class,
width=width, height=height,
force_new=force_new)
return v_info.viewer
def make_server(logger=None, basedir='.', numthreads=5,
host='localhost', port=9909, viewer_class=None):
if logger is None:
logger = log.get_logger("ipg", null=True)
ev_quit = threading.Event()
base_url = "http://%s:%d/app" % (host, port)
app = Widgets.Application(logger=logger, base_url=base_url,
host=host, port=port)
factory = ViewerFactory(logger, app)
server = WebServer(app, factory,
host=host, port=port, viewer_class=viewer_class)
return server
def main(options, args):
logger = log.get_logger("ipg", options=options)
server = make_server(logger=logger, basedir=options.basedir,
numthreads=options.numthreads, host=options.host,
port=options.port)
viewer = server.get_viewer('v1')
logger.info("Starting server with one viewer, connect at %s" % viewer.url)
try:
server.start(use_thread=False)
except KeyboardInterrupt:
logger.info("Interrupted!")
server.stop()
logger.info("Server terminating ...")
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("-d", "--basedir", dest="basedir", metavar="DIR",
default=".",
help="Directory which is at the base of file open requests")
argprs.add_argument("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--host", dest="host", metavar="HOST",
default="localhost",
help="HOST used to decide which interfaces to listen on")
argprs.add_argument("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
argprs.add_argument("--loglevel", dest="loglevel", metavar="LEVEL",
type=int, default=logging.INFO,
help="Set logging level to LEVEL")
argprs.add_argument("--numthreads", dest="numthreads", type=int,
default=5, metavar="NUM",
help="Start NUM threads in thread pool")
argprs.add_argument("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
argprs.add_argument("-p", "--port", dest="port",
type=int, default=9909, metavar="PORT",
help="Default PORT to use for the web socket")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause | d1fa424e2cd5b884f420b37d239d0a3a | 30.699083 | 84 | 0.580343 | 3.91391 | false | false | false | false |
ejeschke/ginga | ginga/rv/plugins/Header.py | 3 | 8904 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
The ``Header`` plugin provides a listing of the metadata associated with the
image.
**Plugin Type: Global**
``Header`` is a global plugin. Only one instance can be opened.
**Usage**
The ``Header`` plugin shows the FITS keyword metadata from the image.
Initially only the Primary HDU metadata is shown. However, in
conjunction with the ``MultiDim`` plugin, the metadata for other HDUs will be
shown. See ``MultiDim`` for details.
If the "Sortable" checkbox has been checked in the lower left of the UI,
then clicking on a column header will sort the table by values in that
column, which may be useful for quickly locating a particular keyword.
If the "Include primary header" checkbox toggles the inclusion of the
primary HDU keywords or not. This option may be disabled if the image
was created with an option not to save the primary header.
"""
from collections import OrderedDict
from ginga import GingaPlugin
from ginga.misc import Bunch
from ginga.gw import Widgets
__all__ = ['Header']
class Header(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Header, self).__init__(fv)
self._image = None
self.active = None
self.info = None
self.columns = [('Keyword', 'key'),
('Value', 'value'),
('Comment', 'comment'),
]
spec = self.fv.get_plugin_spec(str(self))
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Header')
self.settings.add_defaults(sortable=False,
color_alternate_rows=True,
max_rows_for_col_resize=5000,
include_primary_header=False,
closeable=not spec.get('hidden', False))
self.settings.load(onError='silent')
self.flg_sort = self.settings.get('sortable', False)
self.flg_prihdr = self.settings.get('include_primary_header', False)
fv.add_callback('add-channel', self.add_channel)
fv.add_callback('delete-channel', self.delete_channel)
fv.add_callback('channel-change', self.focus_cb)
self.gui_up = False
def build_gui(self, container):
vbox = Widgets.VBox()
vbox.set_border_width(1)
vbox.set_spacing(1)
nb = Widgets.StackWidget()
vbox.add_widget(nb, stretch=1)
self.nb = nb
# create sort toggle
hbox = Widgets.HBox()
cb = Widgets.CheckBox("Sortable")
cb.set_state(self.flg_sort)
cb.add_callback('activated', lambda w, tf: self.set_sortable_cb(tf))
hbox.add_widget(cb, stretch=0)
cb = Widgets.CheckBox("Include primary header")
cb.set_state(self.flg_prihdr)
cb.add_callback('activated', lambda w, tf: self.set_prihdr_cb(tf))
self.w.chk_prihdr = cb
hbox.add_widget(cb, stretch=0)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
if self.settings.get('closeable', False):
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns, stretch=0)
container.add_widget(vbox, stretch=1)
self.gui_up = True
def _create_header_window(self, info):
vbox = Widgets.VBox()
vbox.set_margins(2, 2, 2, 2)
color_alternate = self.settings.get('color_alternate_rows', True)
table = Widgets.TreeView(auto_expand=True,
use_alt_row_color=color_alternate)
self.table = table
table.setup_table(self.columns, 1, 'key')
vbox.add_widget(table, stretch=1)
info.setvals(widget=vbox, table=table)
return vbox
def set_header(self, info, image):
if self._image == image:
# we've already handled this header
return
self.logger.debug("setting header")
if self.gui_up:
has_prihdr = (hasattr(image, 'has_primary_header') and
image.has_primary_header())
self.w.chk_prihdr.set_enabled(has_prihdr)
header = image.get_header(include_primary_header=self.flg_prihdr)
table = info.table
is_sorted = self.flg_sort
tree_dict = OrderedDict()
keys = list(header.keys())
if is_sorted:
keys.sort()
for key in keys:
card = header.get_card(key)
tree_dict[key] = card
table.set_tree(tree_dict)
# Resize column widths
n_rows = len(tree_dict)
if n_rows < self.settings.get('max_rows_for_col_resize', 5000):
table.set_optimal_column_widths()
self.logger.debug("Resized columns for {0} row(s)".format(n_rows))
self.logger.debug("setting header done ({0})".format(is_sorted))
self._image = image
def add_channel(self, viewer, channel):
if not self.gui_up:
return
chname = channel.name
info = Bunch.Bunch(chname=chname)
sw = self._create_header_window(info)
self.nb.add_widget(sw)
info.setvals(widget=sw)
channel.extdata._header_info = info
def delete_channel(self, viewer, channel):
if not self.gui_up:
return
chname = channel.name
self.logger.debug("deleting channel %s" % (chname))
info = channel.extdata._header_info
widget = info.widget
self.nb.remove(widget, delete=True)
self.active = None
self.info = None
def focus_cb(self, viewer, channel):
if not self.gui_up:
return
chname = channel.name
if self.active != chname:
if '_header_info' not in channel.extdata:
self.add_channel(viewer, channel)
info = channel.extdata._header_info
widget = info.widget
index = self.nb.index_of(widget)
self.nb.set_index(index)
self.active = chname
self.info = info
image = channel.get_current_image()
if image is None:
return
self.set_header(self.info, image)
def start(self):
names = self.fv.get_channel_names()
for name in names:
channel = self.fv.get_channel(name)
self.add_channel(self.fv, channel)
channel = self.fv.get_channel_info()
if channel is not None:
viewer = channel.fitsimage
image = viewer.get_image()
if image is not None:
self.redo(channel, image)
self.focus_cb(viewer, channel)
def stop(self):
names = self.fv.get_channel_names()
for name in names:
channel = self.fv.get_channel(name)
channel.extdata._header_info = None
self.gui_up = False
self.nb = None
self.active = None
self.info = None
return True
def redo(self, channel, image):
"""This is called when image changes."""
self._image = None # Skip cache checking in set_header()
info = channel.extdata._header_info
self.set_header(info, image)
def blank(self, channel):
"""This is called when image is cleared."""
self._image = None
info = channel.extdata._header_info
info.table.clear()
def set_sortable_cb(self, tf):
self.flg_sort = tf
self._image = None
if self.info is not None:
info = self.info
channel = self.fv.get_channel(info.chname)
image = channel.get_current_image()
self.set_header(info, image)
def set_prihdr_cb(self, tf):
self.flg_prihdr = tf
self._image = None
if self.info is not None:
info = self.info
channel = self.fv.get_channel(info.chname)
image = channel.get_current_image()
self.set_header(info, image)
def close(self):
self.fv.stop_global_plugin(str(self))
return True
def __str__(self):
return 'header'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_Header', package='ginga')
# END
| bsd-3-clause | ab3973b984760b7b678115ddb085c874 | 31.615385 | 78 | 0.58558 | 3.801879 | false | false | false | false |
ejeschke/ginga | ginga/gtk3w/Widgets.py | 2 | 77295 | #
# Widgets.py -- wrapped Gtk widgets and convenience functions
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os.path
from ginga.gtk3w import GtkHelp
import ginga.icons
from ginga.misc import Callback, Bunch, Settings, LineHistory
from functools import reduce
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GdkPixbuf
import gi
has_webkit = False
try:
# this is necessary to prevent a warning message on import
gi.require_version('WebKit2', '4.0')
from gi.repository import WebKit2 as WebKit # noqa
has_webkit = True
except Exception:
try:
gi.require_version('WebKit', '3.0')
from gi.repository import WebKit # noqa
except Exception:
pass
__all__ = ['WidgetError', 'WidgetBase', 'TextEntry', 'TextEntrySet',
'TextArea', 'Label', 'Button', 'ComboBox',
'SpinBox', 'Slider', 'Dial', 'ScrollBar', 'CheckBox', 'ToggleButton',
'RadioButton', 'Image', 'ProgressBar', 'StatusBar', 'TreeView',
'WebView', 'ContainerBase', 'Box', 'HBox', 'VBox', 'Frame',
'Expander', 'TabWidget', 'StackWidget', 'MDIWidget', 'ScrollArea',
'Splitter', 'GridBox', 'Toolbar', 'MenuAction',
'Menu', 'Menubar', 'TopLevelMixin', 'TopLevel', 'Application',
'Dialog', 'SaveDialog', 'DragPackage', 'WidgetMoveEvent',
'name_mangle', 'make_widget', 'hadjust', 'build_info', 'wrap',
'has_webkit']
# path to our icons
icondir = os.path.split(ginga.icons.__file__)[0]
class WidgetError(Exception):
"""For errors thrown in this module."""
pass
# (see TabWidget)
_widget_move_event = None
_app = None
# BASE
class WidgetBase(Callback.Callbacks):
def __init__(self):
super(WidgetBase, self).__init__()
self.widget = None
# external data can be attached here
self.extdata = Bunch.Bunch()
def get_widget(self):
return self.widget
def set_tooltip(self, text):
self.widget.set_tooltip_text(text)
def get_enabled(self):
self.widget.get_sensitive()
def set_enabled(self, tf):
self.widget.set_sensitive(tf)
def get_size(self):
try:
rect = self.widget.get_allocation()
# x, y = rect.x, rect.y
wd, ht = rect.width, rect.height
except Exception as e:
# window maybe isn't realized yet--try other ways
min_req, nat_req = self.widget.get_preferred_size()
wd, ht = nat_req.width, nat_req.height
# req = self.widget.get_size_request()
# wd, ht = req
# wd, ht = max(1, wd), max(1, ht)
return wd, ht
def get_pos(self):
rect = self.widget.get_allocation()
x, y = rect.x, rect.y
return x, y
def get_app(self):
return _app
def delete(self):
self.widget.destroy()
self.widget = None
def show(self):
# self.widget.show()
self.widget.show_all()
def hide(self):
self.widget.hide()
def is_visible(self):
return self.widget.get_visible()
def focus(self):
self.widget.grab_focus()
def resize(self, width, height):
self.widget.set_size_request(width, height)
# hackish way to allow the widget to be resized down again later
# NOTE: this may cause some problems for sizing certain widgets
if width > 0 and height > 0:
GObject.idle_add(self.widget.set_size_request, -1, -1)
def get_font(self, font_family, point_size):
font = GtkHelp.get_font(font_family, point_size)
return font
def cfg_expand(self, horizontal='fixed', vertical='fixed'):
# this is for compatibility with Qt widgets
pass
# BASIC WIDGETS
class TextEntry(WidgetBase):
def __init__(self, text='', editable=True):
super(TextEntry, self).__init__()
w = Gtk.Entry()
w.set_text(text)
w.set_editable(editable)
w.connect('key-press-event', self._key_press_event)
w.connect('activate', self._cb_redirect)
self.widget = w
self.history = LineHistory.LineHistory()
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.history.append(self.get_text())
self.make_callback('activated')
def _key_press_event(self, widget, event):
keyname = Gdk.keyval_name(event.keyval)
if keyname == 'Up':
try:
text = self.history.prev()
self.set_text(text)
self.widget.set_position(len(text))
except ValueError:
pass
return True
elif keyname == 'Down':
try:
text = self.history.next()
self.set_text(text)
self.widget.set_position(len(text))
except ValueError:
pass
return True
return False
def get_text(self):
return self.widget.get_text()
def set_text(self, text):
self.widget.set_text(text)
def set_editable(self, tf):
self.widget.set_editable(tf)
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.widget.modify_font(font)
def set_length(self, numchars):
# this only sets the visible length of the widget
self.widget.set_width_chars(numchars)
pass
class TextEntrySet(WidgetBase):
def __init__(self, text='', editable=True):
super(TextEntrySet, self).__init__()
hbox = Gtk.HBox()
hbox.set_spacing(4)
w = Gtk.Entry()
w.set_text(text)
w.set_editable(editable)
hbox.pack_start(w, True, True, 0)
w.connect('activate', self._cb_redirect)
self.entry = w
w = Gtk.Button('Set')
w.connect('clicked', self._cb_redirect)
hbox.pack_start(w, False, False, 0)
self.btn = w
self.widget = hbox
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
def get_text(self):
return self.entry.get_text()
def set_text(self, text):
self.entry.set_text(text)
def set_editable(self, tf):
self.entry.set_editable(tf)
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.widget.modify_font(font)
def set_length(self, numchars):
# self.widget.set_width_chars(numchars)
pass
def set_enabled(self, tf):
super(TextEntrySet, self).set_enabled(tf)
self.entry.set_sensitive(tf)
class TextArea(WidgetBase):
def __init__(self, wrap=False, editable=False):
super(TextArea, self).__init__()
tw = Gtk.TextView()
if wrap:
tw.set_wrap_mode(Gtk.WrapMode.WORD)
else:
tw.set_wrap_mode(Gtk.WrapMode.NONE)
tw.set_editable(editable)
self.tw = tw
# this widget has a built in ScrollArea to match Qt functionality
sw = Gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.add(self.tw)
self.widget = sw
self.histlimit = 0
def append_text(self, text, autoscroll=True):
buf = self.tw.get_buffer()
end = buf.get_end_iter()
buf.insert(end, text)
if self.histlimit > 0:
self._history_housekeeping()
if not autoscroll:
return
end = buf.get_end_iter()
mark = buf.get_insert()
# self.tw.scroll_to_iter(end, 0.5)
# NOTE: this was causing a segfault if the text widget is
# not mapped yet! Seems to be fixed in recent versions of
# gtk
buf.move_mark(mark, end)
res = self.tw.scroll_to_mark(mark, 0.2, False, 0.0, 0.0) # noqa
def get_text(self):
buf = self.tw.get_buffer()
return buf.get_text()
def _history_housekeeping(self):
# remove some lines to keep us within our history limit
buf = self.tw.get_buffer()
numlines = buf.get_line_count()
if numlines > self.histlimit:
rmcount = int(numlines - self.histlimit)
start = buf.get_iter_at_line(0)
end = buf.get_iter_at_line(rmcount)
buf.delete(start, end)
def clear(self):
buf = self.tw.get_buffer()
start = buf.get_start_iter()
end = buf.get_end_iter()
buf.delete(start, end)
def set_text(self, text):
self.clear()
self.append_text(text)
def set_limit(self, numlines):
self.histlimit = numlines
self._history_housekeeping()
def set_editable(self, tf):
self.tw.set_editable(tf)
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.tw.modify_font(font)
def set_wrap(self, tf):
if tf:
self.tw.set_wrap_mode(Gtk.WrapMode.WORD)
else:
self.tw.set_wrap_mode(Gtk.WrapMode.NONE)
class Label(WidgetBase):
def __init__(self, text='', halign='left', style='normal', menu=None):
super(Label, self).__init__()
label = Gtk.Label(text)
evbox = Gtk.EventBox()
evbox.set_border_width(0)
evbox.props.visible_window = False
evbox.add(label)
if halign == 'left':
label.set_justify(Gtk.Justification.LEFT)
elif halign == 'center':
label.set_justify(Gtk.Justification.CENTER)
elif halign == 'right':
label.set_justify(Gtk.Justification.RIGHT)
evbox.connect("button_press_event", self._cb_redirect)
self.enable_callback('activated')
evbox.connect("button_release_event", self._cb_redirect2)
self.enable_callback('released')
self.label = label
self.menu = menu
self.evbox = evbox
self.widget = evbox
if style == 'clickable':
fr = Gtk.Frame()
fr.set_shadow_type(Gtk.ShadowType.OUT)
evbox.props.visible_window = True
fr.add(evbox)
self.frame = fr
self.widget = fr
def _cb_redirect(self, widget, event):
# event.button, event.x, event.y
if event.button == 1:
self.make_callback('activated')
return True
elif event.button == 3 and self.menu is not None:
menu_w = self.menu.get_widget()
if menu_w.get_sensitive():
return menu_w.popup(None, None, None, None,
event.button, event.time)
return False
def _cb_redirect2(self, widget, event):
if event.button == 1:
self.make_callback('released')
return True
return False
def get_text(self):
return self.label.get_text()
def set_text(self, text):
self.label.set_text(text)
def set_font(self, font, size=10):
if isinstance(font, str):
font = self.get_font(font, size)
self.label.modify_font(font)
def set_color(self, fg=None, bg=None):
if bg is not None:
GtkHelp.modify_bg(self.evbox, bg)
if fg is not None:
self.label.modify_fg(Gtk.StateType.NORMAL, Gdk.color_parse(fg))
class Button(WidgetBase):
def __init__(self, text=''):
super(Button, self).__init__()
w = Gtk.Button(text)
self.widget = w
w.connect('clicked', self._cb_redirect)
self.enable_callback('activated')
def set_text(self, text):
self.widget.set_label(text)
def get_text(self):
return self.widget.get_label()
def _cb_redirect(self, *args):
self.make_callback('activated')
class ComboBox(WidgetBase):
def __init__(self, editable=False):
super(ComboBox, self).__init__()
cb = GtkHelp.ComboBox(has_entry=editable)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cb.set_model(liststore)
cell = Gtk.CellRendererText()
cb.pack_start(cell, True)
cb.add_attribute(cell, 'text', 0)
if editable:
cb.set_entry_text_column(0)
self.widget = cb
self.widget.sconnect('changed', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
idx = widget.get_active()
self.make_callback('activated', idx)
def insert_alpha(self, text):
model = self.widget.get_model()
tup = (text, )
j = 0
for i in range(len(model)):
j = i
if model[i][0] > text:
model.insert(j, tup)
return
model.insert(j + 1, tup)
def append_text(self, text):
model = self.widget.get_model()
tup = (text, )
idx = len(model)
model.insert(idx, tup)
def insert_text(self, idx, text):
model = self.widget.get_model()
tup = (text, )
model.insert(idx, tup)
def delete_alpha(self, text):
model = self.widget.get_model()
for i in range(len(model)):
if model[i][0] == text:
del model[i]
return
def get_alpha(self, idx):
model = self.widget.get_model()
text = model[idx][0]
return text
def clear(self):
model = self.widget.get_model()
model.clear()
if self.widget.get_has_entry():
entry = self.widget.get_entry()
entry.set_text('')
def set_text(self, text):
model = self.widget.get_model()
for i in range(len(model)):
if model[i][0] == text:
self.widget.set_active(i)
return
if self.widget.get_has_entry():
entry = self.widget.get_child()
entry.set_text(text)
# to be deprecated someday
show_text = set_text
def set_index(self, index):
self.widget.set_active(index)
def get_index(self):
return self.widget.get_active()
def get_text(self):
if self.widget.get_has_entry():
entry = self.widget.get_child()
return entry.get_text()
idx = self.get_index()
return self.get_alpha(idx)
class SpinBox(WidgetBase):
def __init__(self, dtype=int):
super(SpinBox, self).__init__()
self.dtype = dtype
self.widget = GtkHelp.SpinButton()
self.widget.sconnect('value-changed', self._cb_redirect)
self.enable_callback('value-changed')
def _cb_redirect(self, w):
val = self.dtype(w.get_value())
self.make_callback('value-changed', val)
def get_value(self):
return self.dtype(self.widget.get_value())
def set_value(self, val):
self.widget.set_value(val)
def set_decimals(self, num):
self.widget.set_digits(num)
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget.get_adjustment()
adj.configure(minval, minval, maxval, incr_value, incr_value, 0)
class Slider(WidgetBase):
def __init__(self, orientation='horizontal', dtype=int, track=False):
super(Slider, self).__init__()
# NOTE: parameter dtype is ignored for now for gtk3
if orientation == 'horizontal':
w = GtkHelp.HScale()
# TEMP: hack because scales don't seem to expand as expected
w.set_size_request(200, -1)
else:
w = GtkHelp.VScale()
w.set_size_request(-1, 200)
self.widget = w
w.set_draw_value(True)
w.set_value_pos(Gtk.PositionType.BOTTOM)
self.set_tracking(track)
w.sconnect('value-changed', self._cb_redirect)
self.enable_callback('value-changed')
def _cb_redirect(self, range):
val = range.get_value()
self.make_callback('value-changed', val)
def get_value(self):
return self.widget.get_value()
def set_value(self, val):
self.widget.set_value(val)
def set_tracking(self, tf):
if tf:
# self.widget.set_update_policy(Gtk.UPDATE_CONTINUOUS)
pass
else:
# self.widget.set_update_policy(Gtk.UPDATE_DISCONTINUOUS)
pass
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget.get_adjustment()
adj.configure(minval, minval, maxval, incr_value, incr_value, 0)
class Dial(WidgetBase):
def __init__(self, dtype=float, wrap=False, track=False):
super(Dial, self).__init__()
w = GtkHelp.ValueDial()
self.widget = w
w.draw_value = False
w.wrap = wrap
w.set_tracking(track)
w.connect('value-changed', self._cb_redirect)
self.dtype = dtype
self.enable_callback('value-changed')
def _cb_redirect(self, dial, val):
ext_val = self.dtype(val)
self.make_callback('value-changed', ext_val)
def get_value(self):
int_val = self.widget.get_value()
return self.dtype(int_val)
def set_value(self, val):
self.widget.set_value(val)
def set_tracking(self, tf):
self.widget.set_tracking(tf)
def set_limits(self, minval, maxval, incr_value=1):
self.widget.set_limits(minval, maxval, incr_value)
class ScrollBar(WidgetBase):
def __init__(self, orientation='horizontal'):
super(ScrollBar, self).__init__()
if orientation == 'horizontal':
self.widget = Gtk.HScrollbar()
else:
self.widget = Gtk.VScrollbar()
self.widget.set_range(0.0, 100.0)
self.widget.connect('value-changed', self._cb_redirect)
self.enable_callback('activated')
def set_value(self, value):
flt_val = value * 100.0
self.widget.set_value(flt_val)
def get_value(self):
return self.widget.get_value() / 100.0
def _cb_redirect(self, range):
val = range.get_value() / 100.0
self.make_callback('activated', val)
class CheckBox(WidgetBase):
def __init__(self, text=''):
super(CheckBox, self).__init__()
self.widget = GtkHelp.CheckButton(text)
self.widget.sconnect('toggled', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
val = widget.get_active()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.set_active(tf)
def get_state(self):
return self.widget.get_active()
class ToggleButton(WidgetBase):
def __init__(self, text=''):
super(ToggleButton, self).__init__()
w = GtkHelp.ToggleButton(text)
w.set_mode(True)
self.widget = w
self.widget.sconnect('toggled', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
val = widget.get_active()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.set_active(tf)
def get_state(self):
return self.widget.get_active()
class RadioButton(WidgetBase):
def __init__(self, text='', group=None):
super(RadioButton, self).__init__()
if group is not None:
group = group.get_widget()
self.widget = GtkHelp.RadioButton.new_with_label_from_widget(group,
text)
else:
self.widget = GtkHelp.RadioButton.new_with_label(None, text)
self.widget.connect('toggled', self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, widget):
val = widget.get_active()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.set_active(tf)
def get_state(self):
return self.widget.get_active()
class Image(WidgetBase):
def __init__(self, native_image=None, style='normal', menu=None):
super(Image, self).__init__()
if native_image is None:
native_image = Gtk.Image()
self.image = native_image
self.image.set_property("has-tooltip", True)
evbox = Gtk.EventBox()
evbox.add(self.image)
evbox.connect("button-press-event", self._cb_redirect1)
evbox.connect("button-release-event", self._cb_redirect2)
self._action = None
self.menu = menu
self.widget = evbox
self.enable_callback('activated')
def _cb_redirect1(self, widget, event):
if event.type == Gdk.EventType.BUTTON_PRESS:
if event.button == 1:
self._action = 'click'
elif event.button == 3 and self.menu is not None:
menu_w = self.menu.get_widget()
if menu_w.get_sensitive():
return menu_w.popup(None, None, None, None,
event.button, event.time)
def _cb_redirect2(self, widget, event):
if event.type == Gdk.EventType.BUTTON_RELEASE:
if (event.button == 1) and (self._action == 'click'):
self._action = None
self.make_callback('activated')
def _set_image(self, native_image):
self.image.set_from_pixbuf(native_image.get_pixbuf())
def load_file(self, img_path, format=None):
# format ignored at present
pixbuf = GtkHelp.pixbuf_new_from_file(img_path)
self.image.set_from_pixbuf(pixbuf)
class ProgressBar(WidgetBase):
def __init__(self):
super(ProgressBar, self).__init__()
w = Gtk.ProgressBar()
# GTK3
# w.set_orientation(Gtk.Orientation.HORIZONTAL)
# w.set_inverted(False)
self.widget = w
def set_value(self, pct):
pct = float(pct)
self.widget.set_fraction(pct)
self.widget.set_text("%.2f %%" % (pct * 100.0))
class StatusBar(WidgetBase):
def __init__(self):
super(StatusBar, self).__init__()
sbar = Gtk.Statusbar()
self.ctx_id = None
self.widget = sbar
self.statustask = None
def clear_message(self):
self.statustask = None
if self.ctx_id is not None:
try:
self.widget.remove_all(self.ctx_id)
except Exception:
pass
self.ctx_id = None
def set_message(self, msg_str, duration=10.0):
try:
if self.ctx_id is not None:
self.widget.remove_all(self.ctx_id)
except Exception:
pass
self.ctx_id = self.widget.get_context_id('status')
self.widget.push(self.ctx_id, msg_str)
# remove message in about `duration` seconds
if self.statustask is not None:
GObject.source_remove(self.statustask)
self.statustask = None
if duration > 0.0:
self.statustask = GObject.timeout_add(int(1000 * duration),
self.clear_message)
class TreeView(WidgetBase):
def __init__(self, auto_expand=False, sortable=False, selection='single',
use_alt_row_color=False, dragable=False):
super(TreeView, self).__init__()
self.auto_expand = auto_expand
self.sortable = sortable
self.selection = selection
self.dragable = dragable
self.levels = 1
self.leaf_key = None
self.leaf_idx = 0
self.columns = []
self.datakeys = []
# shadow index
self.shadow = {}
# this widget has a built in ScrollArea to match Qt functionality
sw = Gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.widget = sw
if self.dragable:
tv = GtkHelp.MultiDragDropTreeView()
# enable drag from this widget
targets = [("text/plain", 0, GtkHelp.DND_TARGET_TYPE_TEXT),
("text/uri-list", 0, GtkHelp.DND_TARGET_TYPE_URIS)]
tv.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK,
targets, Gdk.DragAction.COPY)
tv.connect("drag-data-get", self._start_drag)
else:
tv = Gtk.TreeView()
self.tv = tv
sw.add(self.tv)
tv.connect('cursor-changed', self._selection_cb)
tv.connect('row-activated', self._cb_redirect)
# needed to get alternating row colors
if use_alt_row_color:
tv.set_rules_hint(True)
if self.selection == 'multiple':
# enable multiple selection
treeselection = tv.get_selection()
treeselection.set_mode(Gtk.SelectionMode.MULTIPLE)
for cbname in ('selected', 'activated', 'drag-start'):
self.enable_callback(cbname)
def setup_table(self, columns, levels, leaf_key):
self.clear()
self.columns = columns
self.levels = levels
self.leaf_key = leaf_key
# create the column headers
if not isinstance(columns[0], str):
# columns specifies a mapping
headers = [col[0] for col in columns]
datakeys = [col[1] for col in columns]
else:
headers = datakeys = columns
self.datakeys = datakeys
self.leaf_idx = datakeys.index(self.leaf_key)
# make sort functions
self.cell_sort_funcs = []
for kwd in self.datakeys:
self.cell_sort_funcs.append(self._mksrtfnN(kwd))
# Remove old columns, if any
for col in list(self.tv.get_columns()):
self.tv.remove_column(col)
# Set up headers
for n in range(0, len(self.columns)):
kwd = self.datakeys[n]
if kwd == 'icon':
cell = Gtk.CellRendererPixbuf()
else:
cell = Gtk.CellRendererText()
cell.set_padding(2, 0)
header = headers[n]
tvc = Gtk.TreeViewColumn(header, cell)
tvc.set_resizable(True)
if self.sortable:
tvc.connect('clicked', self.sort_cb, n)
tvc.set_clickable(True)
if n == 0:
fn_data = self._mkcolfn0(kwd)
# cell.set_property('xalign', 1.0)
else:
fn_data = self._mkcolfnN(kwd)
tvc.set_cell_data_func(cell, fn_data)
self.tv.append_column(tvc)
treemodel = Gtk.TreeStore(object)
self.tv.set_fixed_height_mode(False)
self.tv.set_model(treemodel)
# This speeds up rendering of TreeViews
self.tv.set_fixed_height_mode(True)
def set_tree(self, tree_dict):
self.clear()
model = Gtk.TreeStore(object)
self._add_tree(model, tree_dict)
def add_tree(self, tree_dict):
model = self.tv.get_model()
self._add_tree(model, tree_dict)
def _add_tree(self, model, tree_dict):
# Hack to get around slow TreeView scrolling with large lists
self.tv.set_fixed_height_mode(False)
for key in tree_dict:
self._add_subtree(1, self.shadow,
model, None, key, tree_dict[key])
self.tv.set_model(model)
self.tv.set_fixed_height_mode(True)
# User wants auto expand?
if self.auto_expand:
self.tv.expand_all()
def _add_subtree(self, level, shadow, model, parent_item, key, node):
if level >= self.levels:
# leaf node
try:
bnch = shadow[key]
item_iter = bnch.item
# TODO: update leaf item
except KeyError:
# new item
item_iter = model.append(parent_item, [node])
shadow[key] = Bunch.Bunch(node=node, item=item_iter,
terminal=True)
else:
try:
# node already exists
bnch = shadow[key]
item = bnch.item
d = bnch.node
except KeyError:
# new node
item = model.append(None, [str(key)])
d = {}
shadow[key] = Bunch.Bunch(node=d, item=item, terminal=False)
# recurse for non-leaf interior node
for key in node:
self._add_subtree(level + 1, d, model, item, key, node[key])
def _selection_cb(self, treeview):
path, column = treeview.get_cursor()
if path is None:
return
model = treeview.get_model()
item = model.get_iter(path)
res_dict = {}
self._get_item(res_dict, item)
self.make_callback('selected', res_dict)
def _cb_redirect(self, treeview, path, column):
model = treeview.get_model()
item = model.get_iter(path)
res_dict = {}
self._get_item(res_dict, item)
self.make_callback('activated', res_dict)
def _get_path(self, item):
if item is None:
return []
model = self.tv.get_model()
if not model.iter_has_child(item):
# child node, so append my name to parent's path
path_rest = self._get_path(model.iter_parent(item))
d = model.get_value(item, 0)
if isinstance(d, str):
myname = d
else:
myname = d[self.leaf_key]
path_rest.append(myname)
return path_rest
# non-leaf node case
myname = model.get_value(item, 0)
path_rest = self._get_path(model.iter_parent(item))
path_rest.append(myname)
return path_rest
def _get_item(self, res_dict, item):
# from the model iter `item`, return the item via a path
# in the dictionary `res_dict`
path = self._get_path(item)
d, s = res_dict, self.shadow
for name in path[:-1]:
d = d.setdefault(name, {})
s = s[name].node
dst_key = path[-1]
d[dst_key] = s[dst_key].node
def get_selected(self):
treeselection = self.tv.get_selection()
model, pathlist = treeselection.get_selected_rows()
res_dict = {}
for path in pathlist:
item = model.get_iter(path)
self._get_item(res_dict, item)
return res_dict
def clear(self):
model = Gtk.TreeStore(object)
self.tv.set_model(model)
self.shadow = {}
def clear_selection(self):
treeselection = self.tv.get_selection()
treeselection.unselect_all()
def select_path(self, path, state=True):
treeselection = self.tv.get_selection()
item = self._path_to_item(path)
if state:
treeselection.select_iter(item)
else:
treeselection.unselect_iter(item)
def highlight_path(self, path, onoff, font_color='green'):
item = self._path_to_item(path) # noqa
# TODO
def _path_to_item(self, path):
s = self.shadow
for name in path[:-1]:
s = s[name].node
item = s[path[-1]].item
return item
def scroll_to_path(self, path):
item = self._path_to_item(path)
model = self.tv.get_model()
treepath = model.get_path(item)
self.tv.scroll_to_cell(treepath, use_align=True, row_align=0.5)
def scroll_to_end(self):
model = self.tv.get_model()
num_rows = model.iter_n_children()
item = model.iter_nth_child(None, num_rows - 1)
treepath = model.get_path(item)
self.tv.scroll_to_cell(treepath, use_align=True, row_align=0.5)
def sort_on_column(self, i):
model = self.tv.get_model()
model.set_sort_column_id(i, Gtk.SortType.ASCENDING)
def set_column_width(self, i, width):
col = self.tv.get_column(i)
col.set_max_width(width)
def set_column_widths(self, lwidths):
for i, width in enumerate(lwidths):
if width is not None:
self.set_column_width(i, width)
def set_optimal_column_widths(self):
self.tv.columns_autosize()
def get_column_widths(self):
res = []
for i, _ in enumerate(self.columns):
col = self.tv.get_column(i)
res.append(col.get_width())
return res
def sort_cb(self, column, idx):
treeview = column.get_tree_view()
model = treeview.get_model()
model.set_sort_column_id(idx, Gtk.SortType.ASCENDING)
fn = self.cell_sort_funcs[idx]
model.set_sort_func(idx, fn)
return True
def _mksrtfnN(self, idx):
def fn(*args):
model, iter1, iter2 = args[:3]
bnch1 = model.get_value(iter1, 0)
bnch2 = model.get_value(iter2, 0)
if isinstance(bnch1, str):
if isinstance(bnch2, str):
s1, s2 = bnch1.lower(), bnch2.lower()
if s1 < s2:
return -1
if s1 > s2:
return 1
return 0
val1, val2 = bnch1[idx], bnch2[idx]
if isinstance(val1, str):
val1, val2 = val1.lower(), val2.lower()
if val1 < val2:
return -1
if val1 > val2:
return 1
return 0
return fn
def _mkcolfn0(self, idx):
def fn(*args):
column, cell, model, iter = args[:4]
bnch = model.get_value(iter, 0)
if isinstance(bnch, str):
cell.set_property('text', bnch)
elif isinstance(bnch, GdkPixbuf.Pixbuf):
cell.set_property('pixbuf', bnch)
elif isinstance(bnch[idx], GdkPixbuf.Pixbuf):
cell.set_property('pixbuf', bnch[idx])
else:
cell.set_property('text', bnch[idx])
return fn
def _mkcolfnN(self, idx):
def fn(*args):
column, cell, model, iter = args[:4]
bnch = model.get_value(iter, 0)
if isinstance(bnch, str):
cell.set_property('text', '')
elif isinstance(bnch, GdkPixbuf.Pixbuf):
cell.set_property('text', '')
elif isinstance(bnch[idx], GdkPixbuf.Pixbuf):
cell.set_property('pixbuf', bnch[idx])
else:
cell.set_property('text', str(bnch[idx]))
return fn
def _start_drag(self, treeview, context, selection,
info, timestamp):
res_dict = self.get_selected()
drag_pkg = DragPackage(self.tv, selection)
self.make_callback('drag-start', drag_pkg, res_dict)
drag_pkg.start_drag()
class WebView(WidgetBase):
def __init__(self):
if not has_webkit:
raise NotImplementedError("Missing webkit")
super(WebView, self).__init__()
self.widget = WebKit.WebView()
def load_url(self, url):
self.widget.open(url)
def load_html_string(self, html_string):
self.widget.load_string(html_string, 'text/html', 'utf-8', 'file://')
def go_back(self):
self.widget.go_back()
def go_forward(self):
self.widget.go_forward()
def reload_page(self):
self.widget.reload()
def stop_loading(self):
self.widget.stop_loading()
# CONTAINERS
class ContainerBase(WidgetBase):
def __init__(self):
super(ContainerBase, self).__init__()
self.children = []
for name in ['widget-added', 'widget-removed']:
self.enable_callback(name)
def add_ref(self, ref):
# TODO: should this be a weakref?
self.children.append(ref)
def _remove(self, childw, delete=False):
self.widget.remove(childw)
if delete:
childw.destroy()
def remove(self, child, delete=False):
if child not in self.children:
raise KeyError("Widget is not a child of this container")
self.children.remove(child)
self._remove(child.get_widget(), delete=delete)
self.make_callback('widget-removed', child)
def remove_all(self, delete=False):
for child in list(self.children):
self.remove(child, delete=delete)
def get_children(self):
return self.children
def num_children(self):
return len(self.children)
def _get_native_children(self):
return [child.get_widget() for child in self.children]
def _get_native_index(self, nchild):
l = self._get_native_children()
return l.index(nchild)
def _native_to_child(self, nchild):
idx = self._get_native_index(nchild)
return self.children[idx]
def set_margins(self, left, right, top, bottom):
# TODO: can this be made more accurate?
self.widget.set_border_width(left)
def set_border_width(self, pix):
self.widget.set_border_width(pix)
class Box(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Box, self).__init__()
if orientation == 'horizontal':
self.widget = Gtk.HBox()
else:
self.widget = Gtk.VBox()
def set_spacing(self, val):
self.widget.set_spacing(val)
def insert_widget(self, idx, child, stretch=0.0):
child_w = child.get_widget()
# TODO: can this be made more accurate?
expand = (float(stretch) > 0.0)
self.widget.pack_start(child_w, expand, True, 0)
self.widget.reorder_child(child_w, idx)
self.children.insert(idx, child)
self.widget.show_all()
self.make_callback('widget-added', child)
def add_widget(self, child, stretch=0.0):
self.add_ref(child)
child_w = child.get_widget()
# TODO: can this be made more accurate?
expand = (float(stretch) > 0.0)
self.widget.pack_start(child_w, expand, True, 0)
self.widget.show_all()
self.make_callback('widget-added', child)
class VBox(Box):
def __init__(self):
super(VBox, self).__init__(orientation='vertical')
class HBox(Box):
def __init__(self):
super(HBox, self).__init__(orientation='horizontal')
class Frame(ContainerBase):
def __init__(self, title=None):
super(Frame, self).__init__()
fr = Gtk.Frame(label=title)
fr.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
fr.set_label_align(0.10, 0.5)
self.widget = fr
def set_widget(self, child):
self.remove_all()
self.add_ref(child)
self.widget.add(child.get_widget())
self.widget.show_all()
def set_text(self, text):
w = self.get_widget()
lbl = w.get_label_widget()
lbl.set_text(text)
class Expander(ContainerBase):
r_arrow = None
d_arrow = None
def __init__(self, title=None, notoggle=False):
super(Expander, self).__init__()
vbox = VBox()
vbox.set_margins(0, 0, 0, 0)
vbox.set_spacing(0)
self.widget = vbox.get_widget()
self._vbox = vbox
if Expander.r_arrow is None:
iconpath = os.path.join(icondir, 'triangle-right-48.png')
Expander.r_arrow = GtkHelp.pixbuf_new_from_file_at_size(iconpath,
12, 12)
if Expander.d_arrow is None:
iconpath = os.path.join(icondir, 'triangle-down-48.png')
Expander.d_arrow = GtkHelp.pixbuf_new_from_file_at_size(iconpath,
12, 12)
self._d_arrow = Gtk.Image.new_from_pixbuf(Expander.d_arrow)
self._r_arrow = Gtk.Image.new_from_pixbuf(Expander.r_arrow)
self.toggle = None
if not notoggle:
toggle = ToggleButton(title)
self.toggle = toggle
toggle_w = toggle.get_widget()
toggle_w.set_always_show_image(True)
r_arrow = Gtk.Image.new_from_pixbuf(Expander.r_arrow)
toggle_w.set_image(r_arrow)
toggle.add_callback('activated', self._toggle_widget)
vbox.add_widget(toggle, stretch=0)
self.content = None
for name in ('opened', 'closed'):
self.enable_callback(name)
def set_widget(self, child, stretch=1):
if self.content is not None:
self.widget.remove(self.content)
self.content = child
def expand(self, tf):
children = self._vbox.get_children()
if tf:
if self.content is None or self.content in children:
return
if self.toggle is not None:
self.toggle.get_widget().set_image(self._d_arrow)
self._vbox.add_widget(self.content, stretch=1)
self.make_callback('opened')
else:
if self.content is None or self.content not in children:
return
if self.toggle is not None:
self.toggle.get_widget().set_image(self._r_arrow)
self._vbox.remove(self.content)
self.make_callback('closed')
def _toggle_widget(self, w, tf):
self.expand(tf)
class TabWidget(ContainerBase):
def __init__(self, tabpos='top', reorderable=False, detachable=True,
group=0):
super(TabWidget, self).__init__()
self.reorderable = reorderable
self.detachable = detachable
nb = GtkHelp.Notebook()
# nb = Gtk.Notebook()
nb.set_show_border(False)
nb.set_scrollable(True)
# Allows drag-and-drop between notebooks
# nb.set_group_id(group) # in gtk3?
if self.detachable:
nb.connect("create-window", self._tab_detach_cb)
nb.connect("page-added", self._tab_insert_cb)
nb.connect("page-removed", self._tab_remove_cb)
# contrary to some other widgets, we want the "tab changed" event
# when the index is switched programmatically as well as by user
## nb.sconnect("switch-page", self._cb_redirect)
nb.connect("switch-page", self._cb_redirect)
self.widget = nb
self.set_tab_position(tabpos)
for name in ('page-switch', 'page-close', 'page-move', 'page-detach'):
self.enable_callback(name)
def set_tab_position(self, tabpos):
nb = self.widget
if tabpos == 'top':
nb.set_tab_pos(Gtk.PositionType.TOP)
elif tabpos == 'bottom':
nb.set_tab_pos(Gtk.PositionType.BOTTOM)
elif tabpos == 'left':
nb.set_tab_pos(Gtk.PositionType.LEFT)
elif tabpos == 'right':
nb.set_tab_pos(Gtk.PositionType.RIGHT)
def _tab_detach_cb(self, source, nchild_w, x, y):
child = self._native_to_child(nchild_w)
# remove child
# (native widget already has been removed by gtk)
self.children.remove(child)
# nchild_w.unparent()
self.make_callback('page-detach', child)
def _tab_insert_cb(self, nbw, nchild_w, page_num):
global _widget_move_event
if _widget_move_event is not None:
event, _widget_move_event = _widget_move_event, None
already_here = nchild_w in self._get_native_children()
if not already_here and event.child.get_widget() == nchild_w:
child = event.child
# remove child from src tab
# (native widget already has been removed by gtk)
event.src_widget.children.remove(child)
# add child to us
# (native widget already has been added by gtk)
self.add_ref(child)
self.make_callback('page-move', event.src_widget, child)
def _tab_remove_cb(self, nbw, nchild_w, page_num):
global _widget_move_event
try:
child = self._native_to_child(nchild_w)
_widget_move_event = WidgetMoveEvent(self, child)
except ValueError:
# we were triggered by a removal that is not a move
pass
def _cb_redirect(self, nbw, gptr, index):
child = self.index_to_widget(index)
self.make_callback('page-switch', child)
def _cb_select(self, widget, event, child):
self.make_callback('page-switch', child)
def add_widget(self, child, title=''):
self.add_ref(child)
child_w = child.get_widget()
label = Gtk.Label(title)
evbox = Gtk.EventBox()
evbox.props.visible_window = True
evbox.add(label)
evbox.show_all()
evbox.connect("button-press-event", self._cb_select, child)
self.widget.append_page(child_w, evbox)
if self.reorderable:
self.widget.set_tab_reorderable(child_w, True)
if self.detachable:
self.widget.set_tab_detachable(child_w, True)
self.widget.show_all()
# attach title to child
child.extdata.tab_title = title
self.make_callback('widget-added', child)
def get_index(self):
return self.widget.get_current_page()
def set_index(self, idx):
self.widget.set_current_page(idx)
def index_of(self, child):
widget = child.get_widget()
if widget is None:
return -1
return self.widget.page_num(widget)
def index_to_widget(self, idx):
"""Returns child corresponding to `idx`"""
nchild = self.widget.get_nth_page(idx)
return self._native_to_child(nchild)
def highlight_tab(self, idx, tf):
nchild = self.widget.get_nth_page(idx)
evbox = self.widget.get_tab_label(nchild)
if tf:
GtkHelp.modify_bg(evbox, 'palegreen')
else:
GtkHelp.modify_bg(evbox, None)
class StackWidget(TabWidget):
def __init__(self):
super(StackWidget, self).__init__()
nb = self.widget
# nb.set_scrollable(False)
nb.set_show_tabs(False)
nb.set_show_border(False)
class MDIWidget(ContainerBase):
def __init__(self, tabpos='top', mode='tabs'):
super(MDIWidget, self).__init__()
self.mode = 'mdi'
self.true_mdi = True
# TODO: currently scrollbars are only partially working
sw = Gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.widget = sw
w = GtkHelp.MDIWidget()
self.mdi_w = w
# Monkey patching the internal callbacks so that we can make
# the correct callbacks
w._move_page = w.move_page
w.move_page = self._window_moved
w._resize_page = w.resize_page
w.resize_page = self._window_resized
w._set_current_page = w.set_current_page
w.set_current_page = self._set_current_page
sw.set_hadjustment(self.mdi_w.get_hadjustment())
sw.set_vadjustment(self.mdi_w.get_vadjustment())
sw.add(self.mdi_w)
for name in ('page-switch', 'page-close'):
self.enable_callback(name)
def get_mode(self):
return self.mode
def set_mode(self, mode):
pass
def add_widget(self, child, title=''):
self.add_ref(child)
subwin = MDIWindow(self, child, title=title)
subwin.add_callback('close', self._window_close, child)
self.make_callback('widget-added', child)
return subwin
def _remove(self, childw, delete=False):
self.mdi_w.remove(childw)
if delete:
childw.destroy()
def _window_resized(self, subwin, wd, ht):
self.mdi_w._resize_page(subwin, wd, ht)
# save size
nchild = subwin.widget
child = self._native_to_child(nchild)
child.extdata.mdi_size = (wd, ht)
return True
def _window_moved(self, subwin, x, y):
self.mdi_w._move_page(subwin, x, y)
# save position
nchild = subwin.widget
child = self._native_to_child(nchild)
child.extdata.mdi_pos = (x, y)
return True
def _window_close(self, subwin, child):
return self.make_callback('page-close', child)
def _set_current_page(self, idx):
_idx = self.mdi_w.get_current_page()
self.mdi_w._set_current_page(idx)
if _idx != idx:
child = self.index_to_widget(idx)
self.make_callback('page-switch', child)
def get_index(self):
return self.mdi_w.get_current_page()
def set_index(self, idx):
self.mdi_w.set_current_page(idx)
def index_of(self, child):
return self.mdi_w.page_num(child.get_widget())
def index_to_widget(self, idx):
"""Returns child corresponding to `idx`"""
nchild = self.mdi_w.get_nth_page(idx)
return self._native_to_child(nchild)
def tile_panes(self):
self.mdi_w.tile_pages()
def cascade_panes(self):
self.mdi_w.cascade_pages()
def use_tabs(self, tf):
pass
class MDIWindow(WidgetBase):
def __init__(self, parent, child, title=''):
"""NOTE: this widget is not meant to be instantiated except *inside*
of MDIWidget implementation.
"""
WidgetBase.__init__(self)
self.parent = parent
mdi_w = parent.mdi_w
# does child have a previously saved size?
size = child.extdata.get('mdi_size', None)
if size is not None:
wd, ht = size
child.resize(wd, ht)
child_w = child.get_widget()
label = Gtk.Label(title)
subwin = GtkHelp.MDISubWindow(child_w, label)
self.widget = subwin
# attach title to child
child.extdata.tab_title = title
self.enable_callback('close')
subwin.add_callback('close', self._window_close)
# does child have a previously saved position?
pos = child.extdata.get('mdi_pos', None)
if pos is not None:
subwin.x, subwin.y = pos
mdi_w.add_subwin(subwin)
def get_pos(self):
return self.widget.x, self.widget.y
def raise_(self):
self.widget.raise_()
def lower(self):
self.widget.lower()
def focus(self):
self.widget.focus()
def move(self, x, y):
self.parent.mdi_w.move_page(self.widget, x, y)
def resize(self, wd, ht):
self.parent.mdi_w.resize_page(self.widget, wd, ht)
def maximize(self):
self.parent.mdi_w.maximize_page(self.widget)
def unmaximize(self):
raise WidgetError("this call not available for MDIWindow")
def fullscreen(self):
raise WidgetError("this call not available for MDIWindow")
def unfullscreen(self):
raise WidgetError("this call not available for MDIWindow")
def is_fullscreen(self):
raise WidgetError("this call not available for MDIWindow")
def iconify(self):
self.parent.mdi_w.minimize_page(self.widget)
def uniconify(self):
raise WidgetError("this call not available for MDIWindow")
def set_title(self, title):
self.widget.label.set_text(title)
def _window_close(self, subwin):
return self.make_callback('close')
class ScrollArea(ContainerBase):
def __init__(self):
super(ScrollArea, self).__init__()
sw = Gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.widget = sw
self.enable_callback('configure')
sw.connect("size_allocate", self._resize_cb)
def _resize_cb(self, widget, allocation):
rect = widget.get_allocation()
# x, y = rect.x, rect.y
width, height = rect.width, rect.height
self.make_callback('configure', width, height)
return True
def set_widget(self, child):
self.remove_all()
self.add_ref(child)
self.widget.add_with_viewport(child.get_widget())
self.widget.show_all()
def scroll_to_end(self, vertical=True, horizontal=False):
if vertical:
adj_w = self.widget.get_vadjustment()
maxv = adj_w.get_upper()
adj_w.set_value(maxv)
if horizontal:
adj_w = self.widget.get_hadjustment()
maxv = adj_w.get_upper()
adj_w.set_value(maxv)
class Splitter(ContainerBase):
def __init__(self, orientation='horizontal', thumb_px=8):
super(Splitter, self).__init__()
# thumb_px ignored in this version
self.orientation = orientation
self.widget = self._get_pane()
self.panes = [self.widget]
def _get_pane(self):
if self.orientation == 'horizontal':
w = Gtk.HPaned()
else:
w = Gtk.VPaned()
w.set_wide_handle(True)
return w
def add_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
# without a Frame it can be difficult to see the divider
frame_w = Gtk.Frame()
#frame_w.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
frame_w.set_shadow_type(Gtk.ShadowType.NONE)
frame_w.add(child_w)
if len(self.children) == 1:
self.widget.pack1(frame_w)
else:
last = self.widget
if len(self.panes) > 0:
last = self.panes[-1]
w = self._get_pane()
self.panes.append(w)
w.pack1(frame_w)
last.pack2(w)
self.widget.show_all()
self.make_callback('widget-added', child)
def _get_sizes(self, pane):
rect = pane.get_allocation()
if self.orientation == 'horizontal':
total = rect.width
else:
total = rect.height
pos = pane.get_position()
return (pos, total)
def get_sizes(self):
res = []
if len(self.panes) > 0:
for pane in self.panes[:-1]:
pos, total = self._get_sizes(pane)
res.append(pos)
pane = self.panes[-1]
pos, total = self._get_sizes(pane)
res.append(total)
return res
def set_sizes(self, sizes):
for i, pos in enumerate(sizes):
pane = self.panes[i]
pane.set_position(pos)
class Splitter2(ContainerBase):
def __init__(self, orientation='horizontal', thumb_px=8):
super(Splitter, self).__init__()
self.orientation = orientation
self.widget = GtkHelp.Splitter(orientation=self.orientation,
thumb_px=thumb_px)
def add_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
# without a Frame it can be difficult to see the divider
frame_w = Gtk.Frame()
#frame_w.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
frame_w.set_shadow_type(Gtk.ShadowType.NONE)
frame_w.add(child_w)
self.widget.add_widget(frame_w)
self.widget.show_all()
self.make_callback('widget-added', child)
def get_sizes(self):
return self.widget.get_sizes()
def set_sizes(self, sizes):
self.widget.set_sizes(sizes)
class GridBox(ContainerBase):
def __init__(self, rows=1, columns=1):
super(GridBox, self).__init__()
w = Gtk.Table(rows=rows, columns=columns)
self.widget = w
self.num_rows = rows
self.num_cols = columns
def resize_grid(self, rows, columns):
self.num_rows = rows
self.num_cols = columns
self.widget.resize(rows, columns)
def set_row_spacing(self, val):
self.widget.set_row_spacings(val)
def set_column_spacing(self, val):
self.widget.set_col_spacings(val)
def set_spacing(self, val):
self.set_row_spacing(val)
self.set_column_spacing(val)
def add_widget(self, child, row, col, stretch=0):
resize = False
if row > self.num_rows:
resize = True
self.num_rows = row
if col > self.num_cols:
resize = True
self.num_cols = col
if resize:
self.resize_grid(self.num_rows, self.num_cols)
self.add_ref(child)
w = child.get_widget()
if stretch > 0:
xoptions = (Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.SHRINK |
Gtk.AttachOptions.FILL)
yoptions = (Gtk.AttachOptions.EXPAND | Gtk.AttachOptions.SHRINK |
Gtk.AttachOptions.FILL)
else:
xoptions = (Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK)
yoptions = (Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK)
self.widget.attach(w, col, col + 1, row, row + 1,
xoptions=xoptions, yoptions=yoptions,
xpadding=0, ypadding=0)
self.widget.show_all()
self.make_callback('widget-added', child)
class Toolbar(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Toolbar, self).__init__()
w = Gtk.Toolbar()
w.set_style(Gtk.ToolbarStyle.ICONS)
if orientation == 'horizontal':
w.set_orientation(Gtk.Orientation.HORIZONTAL)
else:
w.set_orientation(Gtk.Orientation.VERTICAL)
self.widget = w
def add_action(self, text, toggle=False, iconpath=None, iconsize=None):
if toggle:
child = ToggleButton(text)
else:
child = Button(text)
if iconpath is not None:
if iconsize is not None:
wd, ht = iconsize
else:
scale_f = _app.screen_res / 96.0
px = int(scale_f * 24)
wd, ht = px, px
pixbuf = GtkHelp.pixbuf_new_from_file_at_size(iconpath, wd, ht)
if pixbuf is not None:
image = Gtk.Image.new_from_pixbuf(pixbuf)
child.get_widget().set_image(image)
self.add_widget(child)
return child
def add_widget(self, child):
# gtk3 says to add a generic widget using ToolItem.new()
tool_w = Gtk.ToolItem.new()
w = child.get_widget()
tool_w.add(w)
w.show()
tool = ContainerBase()
tool.widget = tool_w
tool_w.show()
tool.add_ref(child)
self.add_ref(tool)
self.widget.insert(tool_w, -1)
self.make_callback('widget-added', child)
return tool
def add_menu(self, text, menu=None, mtype='tool'):
if menu is None:
menu = Menu()
if mtype == 'tool':
child = self.add_action(text)
else:
child = Label(text, style='clickable', menu=menu)
self.add_widget(child)
child.add_callback('released', lambda w: menu.hide())
child.add_callback('activated', lambda w: menu.popup())
return menu
def add_separator(self):
sep_w = Gtk.SeparatorToolItem()
sep = wrap(sep_w)
self.widget.insert(sep_w, -1)
self.add_ref(sep)
class MenuAction(WidgetBase):
def __init__(self, text=None, checkable=False):
super(MenuAction, self).__init__()
self.text = text
self.checkable = checkable
if checkable:
self.widget = Gtk.CheckMenuItem(label=text)
self.widget.connect('toggled', self._cb_redirect)
else:
self.widget = Gtk.MenuItem(label=text)
self.widget.connect('activate', self._cb_redirect)
self.widget.show()
self.enable_callback('activated')
def set_state(self, tf):
if not self.checkable:
raise ValueError("Not a checkable menu item")
self.widget.set_active(tf)
def get_state(self):
return self.widget.get_active()
def _cb_redirect(self, *args):
if self.checkable:
tf = self.widget.get_active()
self.make_callback('activated', tf)
else:
self.make_callback('activated')
class Menu(ContainerBase):
def __init__(self):
super(Menu, self).__init__()
self.widget = Gtk.Menu()
self.menus = Bunch.Bunch(caseless=True)
self.widget.show()
def add_widget(self, child):
menuitem_w = child.get_widget()
self.widget.append(menuitem_w)
self.add_ref(child)
# self.widget.show_all()
self.make_callback('widget-added', child)
def add_name(self, name, checkable=False):
child = MenuAction(text=name, checkable=checkable)
self.add_widget(child)
return child
def add_menu(self, name):
item_w = Gtk.MenuItem(label=name)
child = Menu()
self.add_ref(child)
self.menus[name] = child
item_w.set_submenu(child.get_widget())
self.widget.append(item_w)
item_w.show()
return child
def get_menu(self, name):
return self.menus[name]
def add_separator(self):
sep = Gtk.SeparatorMenuItem()
self.widget.append(sep)
sep.show()
def popup(self, widget=None):
menu = self.widget
menu.show_all()
now = int(0)
if menu.get_sensitive():
menu.popup(None, None, None, None, 0, now)
class Menubar(ContainerBase):
def __init__(self):
super(Menubar, self).__init__()
self.widget = Gtk.MenuBar()
self.menus = Bunch.Bunch(caseless=True)
def add_widget(self, child, name):
if not isinstance(child, Menu):
raise ValueError("child widget needs to be a Menu object")
item_w = Gtk.MenuItem(label=name)
item_w.set_submenu(child.get_widget())
self.add_ref(child)
self.widget.append(item_w)
self.menus[name] = child
item_w.show()
self.make_callback('widget-added', child)
return child
def add_name(self, name):
item_w = Gtk.MenuItem(label=name)
child = Menu()
self.add_ref(child)
self.menus[name] = child
item_w.set_submenu(child.get_widget())
self.widget.append(item_w)
item_w.show()
return child
def get_menu(self, name):
return self.menus[name]
class TopLevelMixin(object):
def __init__(self, title=None):
self._fullscreen = False
self.widget.connect("destroy", self._quit)
self.widget.connect("delete_event", self._close_event)
self.widget.connect("window_state_event", self._window_event)
self.widget.connect("configure-event", self._configure_event)
if title is not None:
self.widget.set_title(title)
self.enable_callback('close')
def show(self):
self.widget.show_all()
def hide(self):
self.widget.hide()
def _quit(self, *args):
self.close()
def _close_event(self, widget, event):
try:
self.close()
finally:
# don't automatically destroy window
return True
def _window_event(self, widget, event):
if ((event.changed_mask & Gdk.WindowState.FULLSCREEN) or
(event.changed_mask & Gdk.WindowState.MAXIMIZED)):
self._fullscreen = True
else:
self._fullscreen = False
def _configure_event(self, widget, event):
x, y, width, height = event.x, event.y, event.width, event.height
x, y = self.widget.translate_coordinates(self.widget, x, y)
self.extdata.setvals(x=x, y=y, width=width, height=height)
return False
def close(self):
# try:
# self.widget.destroy()
# except Exception as e:
# pass
# self.widget = None
self.make_callback('close')
def get_size(self):
try:
rect = self.widget.get_allocation()
# x, y = rect.x, rect.y
wd, ht = rect.width, rect.height
except Exception as e:
# window maybe isn't realized yet--try other ways
# req = self.widget.get_size_request()
# wd, ht = req
min_req, nat_req = self.widget.get_preferred_size()
wd, ht = nat_req.width, nat_req.height
ed = self.extdata
wd, ht = ed.get('width', wd), ed.get('height', ht)
return wd, ht
def get_pos(self):
res = None
window = self.widget.get_window()
if window is not None:
res = window.get_origin()
if isinstance(res, tuple) and len(res) == 2:
return res
ed = self.extdata
x, y = ed.get('x', None), ed.get('y', None)
return x, y
def raise_(self):
window = self.widget.get_window()
if window is not None:
window.raise_()
def lower(self):
window = self.widget.get_window()
if window is not None:
window.lower()
def focus(self):
window = self.widget.get_window()
if window is not None:
window.focus()
def move(self, x, y):
window = self.widget.get_window()
if window is not None:
window.move(x, y)
def maximize(self):
window = self.widget.get_window()
if window is not None:
window.maximize()
def unmaximize(self):
window = self.widget.get_window()
if window is not None:
window.unmaximize()
def is_maximized(self):
window = self.widget.get_window()
mask = Gdk.WindowState.MAXIMIZED
return window.get_state() & mask != 0
def fullscreen(self):
window = self.widget.get_window()
if window is not None:
window.fullscreen()
def unfullscreen(self):
window = self.widget.get_window()
if window is not None:
window.unfullscreen()
def is_fullscreen(self):
return self._fullscreen
def iconify(self):
window = self.widget.get_window()
if window is not None:
window.iconify()
def uniconify(self):
window = self.widget.get_window()
if window is not None:
window.deiconify()
def set_title(self, title):
self.widget.set_title(title)
class TopLevel(TopLevelMixin, ContainerBase):
def __init__(self, title=None):
ContainerBase.__init__(self)
self._fullscreen = False
widget = GtkHelp.TopLevel()
ginga_icon = os.path.join(icondir, "ginga.svg")
widget.set_icon(GtkHelp.get_icon(ginga_icon))
self.widget = widget
widget.set_border_width(0)
TopLevelMixin.__init__(self, title=title)
def set_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
self.widget.add(child_w)
class Application(Callback.Callbacks):
def __init__(self, logger=None, settings=None):
global _app
super(Application, self).__init__()
self.logger = logger
if settings is None:
settings = Settings.SettingGroup(logger=self.logger)
self.settings = settings
self.settings.add_defaults(font_scaling_factor=None)
self.window_list = []
self.window_dict = {}
self.wincnt = 0
try:
display = Gdk.Display.get_default()
screen = display.get_default_screen()
window = screen.get_active_window()
monitor = screen.get_monitor_at_window(window)
g = screen.get_monitor_geometry(monitor)
self.screen_ht = g.height
self.screen_wd = g.width
self.screen_res = screen.get_resolution()
scale = self.settings.get('font_scaling_factor', None)
if scale is None:
# hack for Gtk--scale fonts on HiDPI displays
scale = self.screen_res / 72.0
self.logger.debug("setting default font_scaling_factor={}".format(scale))
from ginga.fonts import font_asst
font_asst.default_scaling_factor = scale
except Exception as e:
self.screen_wd = 1600
self.screen_ht = 1200
self.screen_res = 96
# self.logger.debug("screen dimensions %dx%d" % (
# self.screen_wd, self.screen_ht))
_app = self
# supposedly needed for GObject < 3.10.2
GObject.threads_init()
# self._time_save = time.time()
for name in ('shutdown', ):
self.enable_callback(name)
# Set up Gtk style
GtkHelp.set_default_style()
def get_screen_size(self):
return (self.screen_wd, self.screen_ht)
def process_events(self):
while Gtk.events_pending():
try:
Gtk.main_iteration()
# TEMP: to help solve the issue of gtk3 events getting
# lost--we want to know whether the process_event loop
# is running, so ping periodically if events are showing
# up
# cur_time = time.time()
# if cur_time - self._time_save > 10.0:
# self.logger.info("process_events ping!")
# self._time_save = cur_time
except Exception as e:
self.logger.error("Exception in main_iteration() loop: %s" %
(str(e)))
def process_end(self):
pass
def add_window(self, window, wid=None):
if wid is None:
wid = 'win%d' % (self.wincnt)
self.wincnt += 1
window.wid = wid
window.url = ''
window.app = self
self.window_dict[wid] = window
def get_window(self, wid):
return self.window_dict[wid]
def has_window(self, wid):
return wid in self.window_dict
def get_wids(self):
return list(self.window_dict.keys())
def make_window(self, title=None):
w = TopLevel(title=title)
self.add_window(w)
return w
def make_timer(self):
return GtkHelp.Timer()
def mainloop(self):
Gtk.main()
def quit(self):
Gtk.main_quit()
class Dialog(TopLevelMixin, WidgetBase):
def __init__(self, title='', flags=0, buttons=[],
parent=None, modal=False):
WidgetBase.__init__(self)
if parent is not None:
self.parent = parent.get_widget()
else:
self.parent = None
button_list = []
for name, val in buttons:
button_list.extend([name, val])
self.widget = Gtk.Dialog(title=title, flags=flags,
buttons=tuple(button_list))
self.widget.set_modal(modal)
TopLevelMixin.__init__(self, title=title)
self.content = VBox()
self.content.set_border_width(0)
content = self.widget.get_content_area()
content.pack_start(self.content.get_widget(), True, True, 0)
self.widget.connect("response", self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, w, val):
self.make_callback('activated', val)
def get_content_area(self):
return self.content
class SaveDialog(object):
def __init__(self, title='Save File', selectedfilter=None):
action = Gtk.FileChooserAction.SAVE
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK)
self.widget = Gtk.FileChooserDialog(title=title, action=action,
buttons=buttons)
self.selectedfilter = selectedfilter
if selectedfilter is not None:
self._add_filter(selectedfilter)
def _add_filter(self, selectedfilter):
filtr = Gtk.FileFilter()
filtr.add_pattern(selectedfilter)
if 'png' in selectedfilter:
filtr.set_name('Image (*.png)')
self.selectedfilter = '.png'
elif 'avi' in selectedfilter:
filtr.set_name('Movie (*.avi)')
self.selectedfilter = '.avi'
elif 'npz' in selectedfilter:
filtr.set_name('Numpy Compressed Archive (*.npz)')
self.selectedfilter = '.npz'
self.widget.add_filter(filtr)
def get_path(self):
response = self.widget.run()
if response == Gtk.ResponseType.OK:
path = self.widget.get_filename()
if (self.selectedfilter is not None and
not path.endswith(self.selectedfilter)):
path += self.selectedfilter
self.widget.destroy()
return path
elif response == Gtk.ResponseType.CANCEL:
self.widget.destroy()
return None
class DragPackage(object):
def __init__(self, src_widget, selection):
self.src_widget = src_widget
self._selection = selection
def set_urls(self, urls):
self._selection.set_uris(urls)
def set_text(self, text):
self._selection.set_text(text, len(text))
def start_drag(self):
pass
class WidgetMoveEvent(object):
def __init__(self, src_widget, child):
self.src_widget = src_widget
self.child = child
self._result = False
def accept(self):
self._result = True
def reject(self):
self._result = False
# MODULE FUNCTIONS
def name_mangle(name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def make_widget(title, wtype):
if wtype == 'label':
w = Label(title)
w.label.set_alignment(0.95, 0.5)
elif wtype == 'llabel':
w = Label(title)
w.label.set_alignment(0.05, 0.95)
elif wtype == 'entry':
w = TextEntry()
# w.get_widget().set_width_chars(12)
elif wtype == 'entryset':
w = TextEntrySet()
elif wtype == 'combobox':
w = ComboBox()
elif wtype == 'spinbutton':
w = SpinBox(dtype=int)
elif wtype == 'spinfloat':
w = SpinBox(dtype=float)
elif wtype == 'vbox':
w = VBox()
elif wtype == 'hbox':
w = HBox()
elif wtype in ('hslider', 'hscale'):
w = Slider(orientation='horizontal')
elif wtype in ('vslider', 'vscale'):
w = Slider(orientation='vertical')
elif wtype in ('checkbox', 'checkbutton'):
w = CheckBox(title)
elif wtype == 'radiobutton':
w = RadioButton(title)
elif wtype == 'togglebutton':
w = ToggleButton(title)
elif wtype == 'button':
w = Button(title)
elif wtype == 'spacer':
w = Label('')
elif wtype == 'textarea':
w = TextArea(editable=True)
elif wtype == 'toolbar':
w = Toolbar()
elif wtype == 'progress':
w = ProgressBar()
elif wtype == 'menubar':
w = Menubar()
elif wtype == 'dial':
w = Dial()
else:
raise ValueError("Bad wtype=%s" % wtype)
return w
def hadjust(w, orientation):
"""Ostensibly, a function to reduce the vertical footprint of a widget
that is normally used in a vertical stack (usually a Splitter), when it
is instead used in a horizontal orientation.
"""
if orientation != 'horizontal':
return w
# This currently does not seem to be needed for most plugins that are
# coded to flow either vertically or horizontally and, in fact, reduces
# the visual asthetic somewhat.
## spl = Splitter(orientation='vertical')
## spl.add_widget(w)
## spl.add_widget(Label(''))
## return spl
return w
def build_info(captions, orientation='vertical'):
vbox = Gtk.VBox(spacing=2)
numrows = len(captions)
numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
if (numcols % 2) != 0:
raise ValueError("Column spec is not an even number")
numcols = int(numcols // 2)
table = Gtk.Table(rows=numrows, columns=numcols)
table.set_row_spacings(2)
table.set_col_spacings(4)
vbox.pack_start(table, False, False, 0)
wb = Bunch.Bunch()
row = 0
for tup in captions:
col = 0
while col < numcols:
idx = col * 2
if idx < len(tup):
title, wtype = tup[idx:idx + 2]
if not title.endswith(':'):
name = name_mangle(title)
else:
name = name_mangle('lbl_' + title[:-1])
w = make_widget(title, wtype)
table.attach(w.get_widget(), col, col + 1, row, row + 1,
xoptions=Gtk.AttachOptions.FILL,
yoptions=Gtk.AttachOptions.FILL,
xpadding=1, ypadding=1)
wb[name] = w
col += 1
row += 1
vbox.show_all()
w = wrap(vbox)
w = hadjust(w, orientation=orientation)
return w, wb
def wrap(native_widget):
wrapper = WidgetBase()
wrapper.widget = native_widget
return wrapper
# END
| bsd-3-clause | f8586c9995f3d0da2db9d27632d16034 | 29.075875 | 85 | 0.56384 | 3.689323 | false | false | false | false |
ejeschke/ginga | ginga/rv/plugins/Zoom.py | 3 | 13813 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
The ``Zoom`` plugin shows an enlarged image of a cutout region centered
under the cursor position in the associated channel image. As the
cursor is moved around the image, the zoom image updates to allow close
inspection of the pixels or precise control in conjunction with other
plugin operations.
**Plugin Type: Global**
``Zoom`` is a global plugin. Only one instance can be opened.
**Usage**
The magnification of the zoom window can be changed by adjusting the
"Zoom Amount" slider.
Two modes of operation are possible -- absolute and relative zoom:
* In absolute mode, the zoom amount controls exactly the zoom level
shown in the cutout; For example, the channel image may be zoomed into
10X, but the zoom image will only show a 3X image if the zoom amount
is set to 3X.
* In relative mode, the zoom amount setting is interpreted as relative
to the zoom setting of the channel image. If the zoom amount is set
to 3X and the channel image is zoomed to 10X then the zoom image shown
will be 13X (10X + 3X). Note that the zoom amount setting can be < 1,
so a setting of 1/3X with a 3X zoom in the channel image will produce
a 1X zoom image.
The "Refresh Interval" setting controls how quickly the ``Zoom`` plugin
responds to the movement of the cursor in updating the zoom image. The
value is specified in milliseconds.
.. tip:: Usually setting a small refresh interval *improves* the overall
responsiveness of the zoom image, and the default value of 20 is
a reasonable one. You can experiment with the value if the zoom
image seems too jerky or out of sync with the mouse movement in
the channel image window.
The "Defaults" button restores the default settings of the controls.
"""
import time
from ginga.gw import Widgets, Viewers
from ginga import GingaPlugin
__all__ = ['Zoom']
class Zoom(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Zoom, self).__init__(fv)
self.zoomimage = None
self.default_radius = 30
self.default_zoom = 3
self.zoom_x = 0
self.zoom_y = 0
self.t_abszoom = True
self.zoomtask = fv.get_backend_timer()
self.zoomtask.set_callback('expired', self.showzoom_timer_cb)
self.fitsimage_focus = None
self.layer_tag = 'shared-canvas'
self.update_time = time.time()
spec = self.fv.get_plugin_spec(str(self))
# read preferences for this plugin
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Zoom')
self.settings.add_defaults(zoom_amount=self.default_zoom,
closeable=not spec.get('hidden', False),
refresh_interval=0.02)
self.settings.load(onError='silent')
self.zoom_amount = self.settings.get('zoom_amount', self.default_zoom)
self.refresh_interval = self.settings.get('refresh_interval', 0.02)
self.copy_attrs = ['transforms', 'cutlevels', 'rotation', 'rgbmap',
'icc'] # , 'interpolation']
self._wd = 300
self._ht = 300
_sz = max(self._wd, self._ht)
# hack to set a reasonable starting position for the splitter
self._split_sizes = [_sz, _sz]
fv.add_callback('add-channel', self.add_channel)
fv.add_callback('channel-change', self.focus_cb)
self.gui_up = False
def build_gui(self, container):
vtop = Widgets.VBox()
vtop.set_border_width(4)
box, sw, orientation = Widgets.get_oriented_box(container,
orientation=self.settings.get('orientation', None))
# Uncomment to debug; passing parent logger generates too
# much noise in the main logger
#zi = Viewers.CanvasView(logger=self.logger)
zi = Viewers.CanvasView(logger=None)
zi.set_desired_size(self._wd, self._ht)
zi.enable_autozoom('off')
zi.enable_autocuts('off')
zi.zoom_to(self.default_zoom)
settings = zi.get_settings()
settings.get_setting('zoomlevel').add_callback(
'set', self.zoomset, zi)
zi.set_bg(0.4, 0.4, 0.4)
zi.show_pan_mark(True)
# for debugging
zi.set_name('zoomimage')
self.zoomimage = zi
bd = zi.get_bindings()
bd.enable_zoom(False)
bd.enable_pan(False)
bd.enable_cmap(False)
iw = Viewers.GingaViewerWidget(zi)
iw.resize(self._wd, self._ht)
paned = Widgets.Splitter(orientation=orientation)
paned.add_widget(iw)
self.w.splitter = paned
vbox2 = Widgets.VBox()
captions = (("Zoom Amount:", 'label', 'Zoom Amount', 'hscale'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
vbox2.add_widget(w, stretch=0)
self.w.zoom_amount.set_limits(-20, 30, incr_value=1)
self.w.zoom_amount.set_value(self.zoom_amount)
self.w.zoom_amount.add_callback('value-changed', self.set_amount_cb)
self.w.zoom_amount.set_tracking(True)
captions = (("Zoom:", 'label', 'Zoom', 'label'),
("Relative Zoom", 'checkbutton'),
("Refresh Interval", 'label',
'Refresh Interval', 'spinbutton'),
("Defaults", 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.zoom.set_text(self.fv.scale2text(zi.get_scale()))
b.relative_zoom.set_state(not self.t_abszoom)
b.relative_zoom.add_callback("activated", self.set_absrel_cb)
b.defaults.add_callback("activated", lambda w: self.set_defaults())
b.refresh_interval.set_limits(0, 200, incr_value=1)
b.refresh_interval.set_value(int(self.refresh_interval * 1000))
b.refresh_interval.add_callback('value-changed', self.set_refresh_cb)
row = Widgets.HBox()
row.add_widget(w, stretch=0)
row.add_widget(Widgets.Label(''), stretch=1)
vbox2.add_widget(row, stretch=0)
# stretch
spacer = Widgets.Label('')
vbox2.add_widget(spacer, stretch=1)
box.add_widget(vbox2, stretch=1)
paned.add_widget(sw)
paned.set_sizes(self._split_sizes)
vtop.add_widget(paned, stretch=5)
if self.settings.get('closeable', False):
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vtop.add_widget(btns, stretch=0)
container.add_widget(vtop, stretch=5)
self.gui_up = True
def prepare(self, fitsimage):
fitssettings = fitsimage.get_settings()
fitsimage.add_callback('cursor-changed', self.motion_cb)
fitsimage.add_callback('redraw', self.redraw_cb)
fitssettings.get_setting('zoomlevel').add_callback(
'set', self.zoomset_cb, fitsimage)
def add_channel(self, viewer, chinfo):
if not self.gui_up:
return
self.prepare(chinfo.fitsimage)
def start(self):
names = self.fv.get_channel_names()
for name in names:
channel = self.fv.get_channel(name)
self.add_channel(self.fv, channel)
# set up for currently focused channel
channel = self.fv.get_channel_info()
if channel is not None:
self.focus_cb(self.fv, channel)
def stop(self):
self.gui_up = False
self._split_sizes = self.w.splitter.get_sizes()
return True
# CALLBACKS
def update_zoomviewer(self, channel):
fitsimage = channel.fitsimage
self.fitsimage_focus = fitsimage
# Reflect transforms, colormap, etc.
fitsimage.copy_attributes(self.zoomimage, self.copy_attrs)
p_canvas = self.zoomimage.get_private_canvas()
try:
p_canvas.delete_object_by_tag(self.layer_tag)
except Exception:
pass
canvas = fitsimage.get_canvas()
p_canvas.add(canvas, tag=self.layer_tag)
# NOTE: necessary for zoom viewer to correctly handle some settings
# TODO: see if there is a cleaner way to do this
self.zoomimage._imgobj = fitsimage._imgobj
self.zoomimage.redraw(whence=0)
def redo(self, channel, image):
if not self.gui_up:
return
fitsimage = channel.fitsimage
if fitsimage != self.fv.getfocus_viewer():
return True
self.update_zoomviewer(channel)
def focus_cb(self, viewer, channel):
if not self.gui_up:
return
self.update_zoomviewer(channel)
def _zoomset(self, fitsimage, zoomlevel):
if fitsimage != self.fitsimage_focus:
return True
if self.t_abszoom:
# Did user set to absolute zoom?
myzoomlevel = self.zoom_amount
else:
# Amount of zoom is a relative amount
myzoomlevel = zoomlevel + self.zoom_amount
self.logger.debug("zoomlevel=%d myzoom=%d" % (
zoomlevel, myzoomlevel))
self.zoomimage.zoom_to(myzoomlevel)
return True
def zoomset_cb(self, setting, zoomlevel, fitsimage):
"""This method is called when a main FITS widget changes zoom level.
"""
if not self.gui_up:
return
fac_x, fac_y = fitsimage.get_scale_base_xy()
fac_x_me, fac_y_me = self.zoomimage.get_scale_base_xy()
if (fac_x != fac_x_me) or (fac_y != fac_y_me):
alg = fitsimage.get_zoom_algorithm()
self.zoomimage.set_zoom_algorithm(alg)
self.zoomimage.set_scale_base_xy(fac_x, fac_y)
return self._zoomset(self.fitsimage_focus, zoomlevel)
# LOGIC
def magnify_xy(self, fitsimage, data_x, data_y):
# Show zoom image in zoom window
self.zoom_x, self.zoom_y = data_x, data_y
# If this is a new source, then update our widget with the
# attributes of the source
if self.fitsimage_focus != fitsimage:
chname = self.fv.get_channel_name(fitsimage)
channel = self.fv.get_channel(chname)
self.focus_cb(self.fv, channel)
# If the refresh interval has expired then update the zoom image;
# otherwise (re)set the timer until the end of the interval.
cur_time = time.time()
elapsed = cur_time - self.update_time
if elapsed > self.refresh_interval:
# cancel timer
self.zoomtask.clear()
self.showzoom(data_x, data_y)
else:
# store needed data into the timer
self.zoomtask.data.setvals(data_x=data_x, data_y=data_y)
# calculate delta until end of refresh interval
period = self.refresh_interval - elapsed
# set timer
self.zoomtask.cond_set(period)
return True
def motion_cb(self, fitsimage, button, data_x, data_y):
if not self.gui_up:
return
self.magnify_xy(fitsimage, data_x, data_y)
return False
def redraw_cb(self, fitsimage, whence):
if not self.gui_up:
return
if self.fitsimage_focus != fitsimage:
return
self.fitsimage_focus = None
data_x, data_y = fitsimage.get_last_data_xy()[:2]
self.magnify_xy(fitsimage, data_x, data_y)
return False
def showzoom_timer_cb(self, timer):
if not self.gui_up:
return
data = timer.data
self._zoom_data(self.zoomimage, data.data_x, data.data_y)
def _zoom_data(self, fitsimage, data_x, data_y):
fitsimage.set_pan(data_x, data_y)
def showzoom(self, data_x, data_y):
# set the zoom image
self.fv.gui_do(self._zoom_data, self.zoomimage, data_x, data_y)
def set_amount_cb(self, widget, val):
"""This method is called when 'Zoom Amount' control is adjusted.
"""
self.zoom_amount = val
zoomlevel = self.fitsimage_focus.get_zoom()
self._zoomset(self.fitsimage_focus, zoomlevel)
def set_absrel_cb(self, w, val):
self.t_abszoom = not val
zoomlevel = self.fitsimage_focus.get_zoom()
return self._zoomset(self.fitsimage_focus, zoomlevel)
def set_defaults(self):
self.t_abszoom = True
self.w.relative_zoom.set_state(not self.t_abszoom)
self.w.zoom_amount.set_value(self.default_zoom)
self.zoomimage.zoom_to(self.default_zoom)
def zoomset(self, setting, zoomlevel, fitsimage):
text = self.fv.scale2text(self.zoomimage.get_scale())
self.w.zoom.set_text(text)
def set_refresh_cb(self, w, val):
self.refresh_interval = val / 1000.0
self.logger.debug("Setting refresh time to %.4f sec" % (
self.refresh_interval))
def close(self):
self.fv.stop_global_plugin(str(self))
return True
def __str__(self):
return 'zoom'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_Zoom', package='ginga')
# END
| bsd-3-clause | 03b1b6089f9950f3fc9c28a004a0b476 | 34.784974 | 107 | 0.612032 | 3.59901 | false | false | false | false |
ejeschke/ginga | ginga/examples/matplotlib/example1_mpl.py | 2 | 5580 | #! /usr/bin/env python
#
# example1_mpl.py -- Simple, configurable FITS viewer using a matplotlib
# QtAgg backend for Ginga and embedded in a Qt program.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
Usage:
example1_mpl.py [fits file]
You need Qt5 or Qt6 with pyqt bindings (or pyside) installed to run this
example.
"""
import sys
from matplotlib.figure import Figure
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp
from ginga.mplw.ImageViewMpl import CanvasView
from ginga.mplw.FigureCanvasQt import FigureCanvas
from ginga.misc import log
from ginga.util.loader import load_data
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger):
super(FitsViewer, self).__init__()
self.logger = logger
fig = Figure()
w = FigureCanvas(fig)
fi = CanvasView(logger=self.logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_auto_orient(True)
fi.enable_autozoom('on')
#fi.set_callback('drag-drop', self.drop_file)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.show_mode_indicator(True, corner='ur')
fi.ui_set_active(True)
fi.set_figure(fig)
fi.show_mode_indicator(True, corner='ur')
self.fitsimage = fi
fi.get_bindings().enable_all(True)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wopen = QtGui.QPushButton("Open File")
wopen.clicked.connect(self.open_file)
wquit = QtGui.QPushButton("Quit")
wquit.clicked.connect(self.close)
hbox.addStretch(1)
for w in (wopen, wquit):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
vbox.addWidget(hw, stretch=0)
vw = QtGui.QWidget()
self.setCentralWidget(vw)
vw.setLayout(vbox)
fi.configure(512, 512)
def get_fitsimage(self):
return self.fitsimage
def load_file(self, filepath):
image = load_data(filepath, logger=self.logger)
self.fitsimage.set_image(image)
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file(self, fitsimage, paths):
fileName = paths[0]
self.load_file(fileName)
def motion(self, fitsimage, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = fitsimage.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = fitsimage.get_data(int(data_x + 0.5), int(data_y + 0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = fitsimage.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def main(options, args):
if QtHelp.have_pyqt6 or QtHelp.have_pyside6:
QtGui.QApplication.setHighDpiScaleFactorRoundingPolicy(
QtCore.Qt.HighDpiScaleFactorRoundingPolicy.Floor)
app = QtGui.QApplication(args)
logger = log.get_logger(name="example1", options=options)
viewer = FitsViewer(logger)
viewer.resize(524, 540)
viewer.show()
app.setActiveWindow(viewer)
if len(args) > 0:
viewer.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
log.addlogopts(argprs)
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause | 4f8039c17a79fbdef59a724cc7d5f1a5 | 27.911917 | 76 | 0.583333 | 3.625731 | false | false | false | false |
ejeschke/ginga | ginga/table/TableView.py | 2 | 5949 | #
# TableView.py -- Table viewer for Ginga
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import logging
from collections import OrderedDict
from ginga.table import AstroTable
from ginga.misc import Callback, Settings
from ginga.misc import Bunch
class TableViewBase(Callback.Callbacks):
"""An abstract base class for displaying tables represented by
astropy table objects.
Parameters
----------
logger : :py:class:`~logging.Logger` or `None`
Logger for tracing and debugging. If not given, one will be created.
settings : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences. If not given, one will be created.
"""
vname = 'Ginga Table'
vtypes = [AstroTable.AstroTable]
@classmethod
def viewable(cls, dataobj):
"""Test whether `dataobj` is viewable by this viewer."""
if not isinstance(dataobj, AstroTable.AstroTable):
return False
return True
def __init__(self, logger=None, settings=None):
Callback.Callbacks.__init__(self)
if logger is not None:
self.logger = logger
else:
self.logger = logging.Logger('TableViewBase')
# Create settings and set defaults
if settings is None:
settings = Settings.SettingGroup(logger=self.logger)
self.settings = settings
# for debugging
self.name = str(self)
self.settings.add_defaults(color_alternate_rows=True,
max_rows_for_col_resize=5000)
# For callbacks
for name in ('table-set', 'configure', ):
self.enable_callback(name)
def get_settings(self):
"""Get the settings used by this instance.
Returns
-------
settings : `~ginga.misc.Settings.SettingGroup`
Settings.
"""
return self.settings
def get_logger(self):
"""Get the logger used by this instance.
Returns
-------
logger : :py:class:`~logging.Logger`
Logger.
"""
return self.logger
def set_table(self, table):
if not isinstance(table, AstroTable.AstroTable):
raise ValueError("Wrong type of object to load: %s" % (
str(type(table))))
self._table = table
self.make_callback('table-set', table)
def get_table(self):
return self._table
# for compatibility with other Ginga viewers
get_dataobj = get_table
set_dataobj = set_table
def initialize_channel(self, fv, channel):
"""The reference viewer calls this method with itself and the channel
when it is inserted into a channel.
"""
self.logger.warning("subclass should override this method")
class TableViewGw(TableViewBase):
"""A Ginga viewer for displaying tables of FITS data.
"""
def __init__(self, logger=None, settings=None):
super(TableViewGw, self).__init__(logger=logger, settings=settings)
self.add_callback('table-set', self.set_table_cb)
from ginga.gw import Widgets
# Create the viewer as a Treeview widget
color_alternate = self.settings.get('color_alternate_rows', True)
self.widget = Widgets.TreeView(auto_expand=True,
sortable=True,
use_alt_row_color=color_alternate)
def get_widget(self):
return self.widget
def set_table_cb(self, viewer, table):
"""Display the given table object."""
self.clear()
tree_dict = OrderedDict()
# Extract data as astropy table
a_tab = table.get_data()
columns = [('Row', '_DISPLAY_ROW')]
# This is to get around table widget not sorting numbers properly
i_fmt = '{{0:0{0}d}}'.format(len(str(table.rows)))
if table.kind == 'table-astropy':
# Fill masked values, if applicable
try:
a_tab = a_tab.filled()
except Exception: # Just use original table
pass
# Table header with units
for c in a_tab.columns.values():
col_str = '{0:^s}\n{1:^s}'.format(c.name, str(c.unit))
columns.append((col_str, c.name))
# Table contents
for i, row in enumerate(a_tab, 1):
bnch = Bunch.Bunch(zip(row.colnames, row.as_void()))
i_str = i_fmt.format(i)
bnch['_DISPLAY_ROW'] = i_str
tree_dict[i_str] = bnch
elif table.kind == 'table-fitsio':
colnames = table.colnames
# Table header
for c_name in colnames:
col_str = '{0:^s}'.format(c_name)
columns.append((col_str, c_name))
# Table contents
for i, row in enumerate(a_tab, 1):
bnch = Bunch.Bunch(zip(colnames, row))
i_str = i_fmt.format(i)
bnch['_DISPLAY_ROW'] = i_str
tree_dict[i_str] = bnch
else:
raise ValueError(f"I don't know how to display tables of type '{table.kind}'")
self.widget.setup_table(columns, 1, '_DISPLAY_ROW')
self.widget.set_tree(tree_dict)
# Resize column widths
n_rows = len(tree_dict)
if n_rows < self.settings.get('max_rows_for_col_resize', 5000):
self.widget.set_optimal_column_widths()
self.logger.debug('Resized columns for {0} row(s)'.format(n_rows))
tablename = table.get('name', 'NoName')
self.logger.debug('Displayed {0}'.format(tablename))
def clear(self):
self.widget.clear()
def initialize_channel(self, fv, channel):
# no housekeeping to do (for now) on our part, just override to
# suppress the logger warning
pass
| bsd-3-clause | cb3e6a3ce1843ceea64983bf4baa9da9 | 29.507692 | 90 | 0.575895 | 4.057981 | false | false | false | false |
ejeschke/ginga | ginga/rv/plugins/MultiDim.py | 3 | 25480 | #
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
A plugin to navigate HDUs in a FITS file or planes in a 3D cube or
higher dimension dataset.
**Plugin Type: Local**
``MultiDim`` is a local plugin, which means it is associated with a
channel. An instance can be opened for each channel.
**Usage**
``MultiDim`` is a plugin designed to handle data cubes and multi-HDU FITS
files. If you have opened such an image in Ginga, starting this plugin
will enable you to browse to other slices of the cube or view other
HDUs.
For a data cube, you can save a slice as an image using the "Save Slice"
button or create a movie using the "Save Movie" button by entering the
"Start" and "End" slice indices. This feature requires ``mencoder`` to be
installed.
For a FITS table, its data are read in using Astropy table.
Column units are displayed right under the main header ("None" if no unit).
For masked columns, masked values are replaced with pre-defined fill values.
**Browsing HDUs**
Use the HDU drop down list in the upper part of the UI to browse and
select an HDU to open in the channel.
**Navigating Cubes**
Use the controls in the lower part of the UI to select the axis and
to step through the planes in that axis.
**User Configuration**
"""
import time
import re
import os
from distutils import spawn
from contextlib import contextmanager
from ginga.gw import Widgets
from ginga.misc import Future
from ginga import GingaPlugin
from ginga.util import iohelper
from ginga.util.videosink import VideoSink
import numpy as np
have_mencoder = False
if spawn.find_executable("mencoder"):
have_mencoder = True
__all__ = ['MultiDim']
class MultiDim(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(MultiDim, self).__init__(fv, fitsimage)
self._toc_fmt = "%(index)4d %(name)-12.12s (%(extver)3d) %(htype)-12.12s %(dtype)-8.8s"
self.curhdu = 0
self.naxispath = []
self.name_pfx = 'NONAME'
self.img_path = None
self.img_name = None
self.file_obj = None
self.orientation = 'vertical'
# For animation feature
self.play_image = None
self.play_axis = 2
self.play_idx = 0
self.play_max = 0
self.play_int_sec = 0.1
self.play_min_sec = 1.0 / 30
self.play_last_time = 0.0
self.play_fps = 0
self.timer = fv.get_timer()
self.timer.set_callback('expired', self._play_next_cb)
# Load plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_MultiDim')
self.settings.add_defaults(sort_keys=['index'],
sort_reverse=False)
self.settings.load(onError='silent')
self.gui_up = False
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container,
scrolled=True,
aspect=3.0,
orientation=self.settings.get('orientation', None))
self.orientation = orientation
vbox.set_border_width(4)
vbox.set_spacing(2)
fr = Widgets.Frame("HDU")
vb1 = Widgets.VBox()
captions = [("Num HDUs:", 'label', "Num HDUs", 'llabel'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.numhdu = b.num_hdus
self.w.update(b)
vb1.add_widget(w)
captions = [("Choose HDU", 'combobox'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
vb1.add_widget(w)
self.w.hdu = b.choose_hdu
self.w.hdu.set_tooltip("Choose which HDU to view")
self.w.hdu.add_callback('activated', self.set_hdu_cb)
fr.set_widget(vb1)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("NAXIS (data cubes)")
self.naxisfr = fr
vbox.add_widget(fr, stretch=0)
tbar = Widgets.Toolbar(orientation='horizontal')
for name, actn, cb in (
('first', 'first', lambda w: self.first_slice()),
('last', 'last', lambda w: self.last_slice()),
('reverse', 'prev', lambda w: self.prev_slice()),
('forward', 'next', lambda w: self.next_slice()),
('play', 'play', lambda w: self.play_start()),
('stop', 'stop', lambda w: self.play_stop()), ):
iconpath = os.path.join(self.fv.iconpath, "%s_48.png" % name)
btn = tbar.add_action(None, iconpath=iconpath)
self.w[actn] = btn
btn.set_enabled(False)
btn.set_tooltip(actn)
btn.add_callback('activated', cb)
vbox.add_widget(tbar, stretch=0)
captions = [("Interval:", 'label', "Interval", 'spinfloat',
"fps", 'llabel'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
lower, upper = self.play_min_sec, 8.0
b.interval.set_limits(lower, upper, incr_value=0.01)
b.interval.set_value(self.play_int_sec)
b.interval.set_decimals(2)
b.interval.add_callback('value-changed', self.play_int_cb)
b.interval.set_enabled(False)
vbox.add_widget(w, stretch=0)
captions = [("Slice:", 'label', "Slice", 'llabel'),
# ("Value:", 'label', "Value", 'llabel'),
("Save Slice", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.save_slice.add_callback('activated', lambda w: self.save_slice_cb())
b.save_slice.set_enabled(False)
b.save_slice.set_tooltip("Save current slice as RGB image")
vbox.add_widget(w, stretch=0)
fr = Widgets.Frame("Movie")
if have_mencoder:
captions = [("Start:", 'label', "Start Slice", 'entry',
"End:", 'label', "End Slice", 'entry',
'Save Movie', 'button')]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.start_slice.set_tooltip("Starting slice")
b.end_slice.set_tooltip("Ending slice")
b.start_slice.set_length(6)
b.end_slice.set_length(6)
b.save_movie.add_callback(
'activated', lambda w: self.save_movie_cb())
b.save_movie.set_enabled(False)
fr.set_widget(w)
else:
infolbl = Widgets.Label()
infolbl.set_text("Please install 'mencoder' to save as movie")
fr.set_widget(infolbl)
vbox.add_widget(fr, stretch=0)
# spacer = Widgets.Label('')
# vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def set_hdu_cb(self, w, val):
# Get actual HDU index, which might be different from combobox order.
toc_ent = w.get_alpha(val)
idx = max(0, int(toc_ent[:4]))
try:
self.set_hdu(idx)
except Exception as e:
self.logger.error("Error loading HDU #{}: {}".format(
idx + 1, str(e)))
def set_naxis_cb(self, widget, idx, n):
play_idx = int(idx) - 1
self.set_naxis(play_idx, n)
def build_naxis(self, dims, image):
self.naxispath = list(image.naxispath)
ndims = len(dims)
# build a vbox of NAXIS controls
captions = [("NAXIS1:", 'label', 'NAXIS1', 'llabel'),
("NAXIS2:", 'label', 'NAXIS2', 'llabel')]
for n in range(2, ndims):
key = 'naxis%d' % (n + 1)
title = key.upper()
maxn = int(dims[n])
self.logger.debug("NAXIS%d=%d" % (n + 1, maxn))
if maxn <= 1:
captions.append((title + ':', 'label', title, 'llabel'))
else:
captions.append((title + ':', 'label', title, 'llabel',
"Choose %s" % (title), 'hscale'))
# Remove old naxis widgets
for key in self.w:
if key.startswith('choose_'):
self.w[key] = None
hbox = Widgets.HBox()
if ndims > 3: # only add radiobuttons if we have more than 3 dim
group = None
for i in range(2, ndims):
title = 'AXIS%d' % (i + 1)
btn = Widgets.RadioButton(title, group=group)
if group is None:
group = btn
hbox.add_widget(btn)
self.w[title.lower()] = btn
w, b = Widgets.build_info(captions, orientation=self.orientation)
self.w.update(b)
vbox = Widgets.VBox()
vbox.add_widget(w)
vbox.add_widget(hbox)
for n in range(0, ndims):
key = 'naxis%d' % (n + 1)
lbl = b[key]
maxn = int(dims[n])
lbl.set_text("%d" % maxn)
slkey = 'choose_' + key
if slkey in b:
slider = b[slkey]
lower = 1
upper = maxn
slider.set_limits(lower, upper, incr_value=1)
text = self.naxispath[n - 2] + 1
if np.isscalar(text):
slider.set_value(text)
else:
slider.set_value(text[n - 2])
slider.set_tracking(True)
# slider.set_digits(0)
# slider.set_wrap(True)
slider.add_callback('value-changed', self.set_naxis_cb, n)
# Disable playback if there is only 1 slice in the higher dimension
if n > 2 and dims[n] == 1:
radiobutton = self.w['axis%d' % (n + 1)]
radiobutton.set_enabled(False)
# Add vbox of naxis controls to gui
self.naxisfr.set_widget(vbox)
# for storing play_idx for each dim of image. used for going back to
# the idx where you left off.
self.play_indices = ([0 for i in range(ndims - 2)] if ndims > 3
else None)
if ndims > 3:
# dims only exists in here, hence this function exists here
def play_axis_change_func_creator(n):
# widget callable needs (widget, value) args
def play_axis_change():
self.play_indices[self.play_axis - 2] = self.play_idx % dims[self.play_axis] # noqa
self.play_axis = n
self.logger.debug("play_axis changed to %d" % n)
if self.play_axis < ndims:
self.play_max = dims[self.play_axis] - 1
self.play_idx = self.play_indices[n - 2]
self.fv.gui_do(self.set_naxis, self.play_idx,
self.play_axis)
def check_if_we_need_change(w, v):
if self.play_axis is not n:
play_axis_change()
return check_if_we_need_change
for n in range(2, ndims):
key = 'axis%d' % (n + 1)
self.w[key].add_callback(
'activated', play_axis_change_func_creator(n))
if n == 2:
self.w[key].set_state(True)
is_dc = ndims > 2
self.play_axis = 2
if self.play_axis < ndims:
self.play_max = dims[self.play_axis] - 1
if is_dc:
self.play_idx = self.naxispath[self.play_axis - 2]
else:
self.play_idx = 0
if self.play_indices:
text = [i + 1 for i in self.naxispath]
else:
text = self.play_idx + 1
self.w.slice.set_text(str(text))
# Enable or disable NAXIS animation controls
self.w.next.set_enabled(is_dc)
self.w.prev.set_enabled(is_dc)
self.w.first.set_enabled(is_dc)
self.w.last.set_enabled(is_dc)
self.w.play.set_enabled(is_dc)
self.w.stop.set_enabled(is_dc)
self.w.interval.set_enabled(is_dc)
self.w.save_slice.set_enabled(is_dc)
if have_mencoder:
self.w.save_movie.set_enabled(is_dc)
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.resume()
def pause(self):
self.play_stop()
pass
def resume(self):
self.redo()
def stop(self):
self.gui_up = False
self.play_stop()
if self.file_obj is not None:
try:
self.file_obj.close()
except Exception:
pass
self.file_obj = None
self.img_path = None
self.img_name = None
self.fv.show_status("")
def set_hdu(self, idx):
self.logger.debug("Loading index #%d" % (idx))
# determine canonical index of this HDU
info_dct = self.file_obj.get_directory()
info = self.file_obj.get_info_idx(idx)
aidx = (info.name, info.extver)
if aidx not in info_dct:
aidx = idx
sfx = iohelper.get_hdu_suffix(aidx)
# See if this HDU is still in the channel's datasrc
imname = self.name_pfx + sfx
chname = self.chname
chinfo = self.channel
if imname in chinfo.datasrc:
self.curhdu = idx
image = chinfo.datasrc[imname]
self.fv.switch_name(chname, imname)
return
# Nope, we'll have to load it
self.logger.debug("Index %d not in memory; refreshing from file" % (idx))
def _load_idx(image):
try:
# create a future for reconstituting this HDU
future = Future.Future()
future.freeze(self.fv.load_image, self.img_path, idx=aidx)
image.set(path=self.img_path, idx=aidx, name=imname,
image_future=future)
self.fv.add_image(imname, image, chname=chname)
self.curhdu = idx
self.logger.debug("HDU #%d loaded." % (idx))
except Exception as e:
errmsg = "Error loading FITS HDU #%d: %s" % (
idx, str(e))
self.logger.error(errmsg)
self.fv.show_error(errmsg, raisetab=True)
self.file_obj.load_idx_cont(idx, _load_idx)
def set_naxis(self, idx, n):
"""Change the slice shown in the channel viewer.
`idx` is the slice index (0-based); `n` is the axis (0-based)
"""
self.play_idx = idx
self.logger.debug("naxis %d index is %d" % (n + 1, idx + 1))
image = self.fitsimage.get_image()
slidername = 'choose_naxis%d' % (n + 1)
try:
if image is None:
raise ValueError("Please load an image cube")
m = n - 2
self.naxispath[m] = idx
self.logger.debug("m=%d naxispath=%s" % (m, str(self.naxispath)))
image.set_naxispath(self.naxispath)
self.logger.debug("NAXIS%d slice %d loaded." % (n + 1, idx + 1))
if self.play_indices:
# save play index
self.play_indices[m] = idx
text = [i + 1 for i in self.naxispath]
if slidername in self.w:
self.w[slidername].set_value(text[m])
else:
text = idx + 1
if slidername in self.w:
self.w[slidername].set_value(text)
self.w.slice.set_text(str(text))
# schedule a redraw
self.fitsimage.redraw(whence=0)
except Exception as e:
errmsg = "Error loading NAXIS%d slice %d: %s" % (
n + 1, idx + 1, str(e))
self.logger.error(errmsg)
self.fv.error(errmsg)
def play_start(self):
image = self.fitsimage.get_image()
if image is None:
return
self.play_image = image
self._isplaying = True
image.block_callback('modified')
self.play_last_time = time.time()
self.play_next(self.timer)
def _play_next_cb(self, timer):
# this is the playback timer callback
# timer is run in a non-gui thread
self.fv.gui_do(self.play_next, timer)
def play_next(self, timer):
if not self._isplaying:
# callback after user stopped playback
return
image = self.fitsimage.get_image()
if image is None:
self.play_stop()
return
time_start = time.time()
deadline = time_start + self.play_int_sec
# calculate fps
dt = abs(time_start - self.play_last_time)
fps = 1.0 / dt if not np.isclose(dt, 0) else 1.0
self.play_last_time = time_start
if int(fps) != int(self.play_fps):
self.play_fps = fps
self.w.fps.set_text(str("%.2f fps" % fps))
self.next_slice()
# set timer for next turnaround
delta = deadline - time.time()
timer.set(max(delta, 0.001))
def play_stop(self):
self._isplaying = False
if self.play_image is not None:
self.play_image.unblock_callback('modified')
self.play_image = None
def first_slice(self):
play_idx = 0
self.fv.gui_do(self.set_naxis, play_idx, self.play_axis)
def last_slice(self):
play_idx = self.play_max
self.fv.gui_do(self.set_naxis, play_idx, self.play_axis)
def prev_slice(self):
if np.isscalar(self.play_idx):
play_idx = self.play_idx - 1
else:
m = self.play_axis - 2
play_idx = self.play_idx[m] - 1
if play_idx < 0:
play_idx = self.play_max
self.fv.gui_do(self.set_naxis, play_idx, self.play_axis)
def next_slice(self):
if np.isscalar(self.play_idx):
play_idx = self.play_idx + 1
else:
m = self.play_axis - 2
play_idx = self.play_idx[m] + 1
if play_idx > self.play_max:
play_idx = 0
self.fv.gui_do(self.set_naxis, play_idx, self.play_axis)
def play_int_cb(self, w, val):
# force at least play_min_sec, otherwise playback is untenable
self.play_int_sec = max(self.play_min_sec, val)
def prep_hdu_menu(self, w, hdu_info):
# clear old TOC
w.clear()
# User sort settings
sort_keys = self.settings.get('sort_keys', ['index'])
sort_reverse = self.settings.get('sort_reverse', False)
sorted_hdu_info = sorted(hdu_info,
key=lambda x: [x[key] for key in sort_keys],
reverse=sort_reverse)
for idx, d in enumerate(sorted_hdu_info):
toc_ent = self._toc_fmt % d
w.append_text(toc_ent)
# idx = w.get_index()
# if idx < 0:
# idx = 0
# if idx >= len(hdu_info):
# idx = len(hdu_info) - 1
# w.set_index(idx)
def redo(self):
"""Called when an image is set in the channel."""
image = self.channel.get_current_image()
if image is None:
return True
path = image.get('path', None)
if path is None:
self.fv.show_error(
"Cannot open image: no value for metadata key 'path'")
return
# TODO: How to properly reset GUI components?
# They are still showing info from prev FITS.
# No-op for ASDF
if path.endswith('.asdf'):
return True
if path != self.img_path:
# <-- New file is being looked at
self.img_path = path
# close previous file opener, if any
if self.file_obj is not None:
try:
self.file_obj.close()
except Exception:
pass
self.file_obj = image.io
# TODO: specify 'readonly' somehow?
self.file_obj.open_file(path)
hdu_dct = self.file_obj.get_directory()
upper = len(self.file_obj) - 1
# NOTE: make a set of values, because some values will be in
# multiple times if known by several indexes
self.prep_hdu_menu(self.w.hdu, list(set(hdu_dct.values())))
self.num_hdu = upper
self.logger.debug("there are %d hdus" % (upper + 1))
self.w.numhdu.set_text("%d" % (upper + 1))
self.w.hdu.set_enabled(len(self.file_obj) > 0)
else:
hdu_dct = self.file_obj.get_directory()
name = image.get('name', iohelper.name_image_from_path(path))
idx = image.get('idx', None)
# remove index designation from root of name, if any
match = re.match(r'^(.+)\[(.+)\]$', name)
if match:
name = match.group(1)
self.name_pfx = name
htype = None
if idx is not None:
# set the HDU in the drop down if known
info = hdu_dct.get(idx, None)
if info is not None:
htype = info.htype.lower()
toc_ent = self._toc_fmt % info
self.w.hdu.show_text(toc_ent)
# rebuild the NAXIS controls, if necessary
wd, ht = image.get_size()
# No two images in the same channel can have the same name.
# Here we keep track of the name to decide if we need to rebuild
if self.img_name != name:
self.img_name = name
dims = [wd, ht]
data = image.get_data()
if data is None:
# <- empty data part to this HDU
self.logger.warning("Empty data part in HDU %s" % (str(idx)))
elif htype in ('bintablehdu', 'tablehdu',):
pass
elif htype not in ('imagehdu', 'primaryhdu', 'compimagehdu'):
self.logger.warning("HDU %s is not an image (%s)" % (
str(idx), htype))
else:
dims = list(image.axisdim)
dims.reverse()
self.build_naxis(dims, image)
def save_slice_cb(self):
import matplotlib.pyplot as plt
w = Widgets.SaveDialog(title='Save slice',
selectedfilter='*.png')
target = w.get_path()
if target is None:
# save canceled
return
with open(target, 'wb') as target_file:
hival = self.fitsimage.get_cut_levels()[1]
image = self.fitsimage.get_image()
curr_slice_data = image.get_data()
plt.imsave(target_file, curr_slice_data, vmax=hival,
cmap=plt.get_cmap('gray'), origin='lower')
self.fv.show_status("Successfully saved slice")
def save_movie_cb(self):
start = int(self.w.start_slice.get_text())
end = int(self.w.end_slice.get_text())
if not start or not end:
return
elif start < 1 or end > (self.play_max + 1):
self.fv.show_status("Wrong slice index")
return
elif start > end:
self.fv.show_status("Wrong slice order")
return
start, end = start - 1, end - 1
w = Widgets.SaveDialog(title='Save Movie',
selectedfilter='*.avi')
target = w.get_path()
if target is not None:
self.save_movie(start, end, target)
def save_movie(self, start, end, target_file):
image = self.fitsimage.get_image()
loval, hival = self.fitsimage.get_cut_levels()
data = np.array(image.get_mddata()).clip(loval, hival)
# http://stackoverflow.com/questions/7042190/plotting-directly-to-movie-with-numpy-and-mencoder
data_rescaled = ((data - loval) * 255 / (hival - loval)).astype(
np.uint8, copy=False)
W, H = image.get_data_size()
with self.video_writer(VideoSink((H, W), target_file)) as video:
for i in range(start, end):
video.write(np.flipud(data_rescaled[i]))
self.fv.show_status("Successfully saved movie")
@contextmanager
def video_writer(self, v):
v.open()
try:
yield v
finally:
v.close()
return
def __str__(self):
return 'multidim'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_MultiDim', package='ginga')
# END
| bsd-3-clause | 30d2acfc009c8ff92f7cb6684b896b39 | 32.837981 | 108 | 0.533713 | 3.615723 | false | false | false | false |
ejeschke/ginga | ginga/opengl/geometry_helper.py | 3 | 8388 | #
# geometry_helper.py -- help module for Ginga OpenGL camera
#
# Credit:
# Modified from code written by M. McGuffin
# http://profs.etsmtl.ca/mmcguffin/code/python/example-3D_Python-Qt-OpenGL/
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
class Point3D(object):
def __init__(self, x=0, y=0, z=0):
self.coord = np.array([x, y, z], dtype=np.float32)
@property
def x(self):
return self.coord[0]
@property
def y(self):
return self.coord[1]
@property
def z(self):
return self.coord[2]
def __repr__(self):
return "Point3D(%f, %f, %f)" % tuple(self.coord)
def __str__(self):
return "P(%f, %f, %f)" % tuple(self.coord)
def get(self):
return self.coord
def copy(self):
return Point3D(self.x, self.y, self.z)
def as_Vector3D(self):
return Vector3D(self.x, self.y, self.z)
def distance(self, other):
return (other - self).length()
def average(self, other):
return Point3D((self.x + other.x) * 0.5,
(self.y + other.y) * 0.5,
(self.z + other.z) * 0.5)
def __add__(self, other):
return Point3D(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
if isinstance(other, Vector3D):
return Point3D(self.x - other.x, self.y - other.y, self.z - other.z)
return Vector3D(self.x - other.x, self.y - other.y, self.z - other.z)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __ne__(self, other):
return not (self == other)
class Vector3D(object):
def __init__(self, x=0, y=0, z=0):
self.coord = np.array([x, y, z], dtype=np.float32)
@property
def x(self):
return self.coord[0]
@property
def y(self):
return self.coord[1]
@property
def z(self):
return self.coord[2]
def __repr__(self):
return "Vector3D(%f, %f, %f)" % tuple(self.coord)
def __str__(self):
return "V(%f, %f, %f)" % tuple(self.coord)
def get(self):
return self.coord
def copy(self):
return Vector3D(self.x, self.y, self.z)
def as_Point3D(self):
return Point3D(self.x, self.y, self.z)
def length_squared(self):
return self.x * self.x + self.y * self.y + self.z * self.z
def length(self):
return np.sqrt(self.length_squared())
def normalized(self):
l = self.length()
if l > 0:
return Vector3D(self.x / l, self.y / l, self.z / l)
return self.copy()
def __neg__(self):
return Vector3D(-self.x, -self.y, -self.z)
def __add__(self, other):
if isinstance(other, Point3D):
return Point3D(self.x + other.x, self.y + other.y, self.z + other.z)
return Vector3D(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return Vector3D(self.x - other.x, self.y - other.y, self.z - other.z)
def __mul__(self, other):
if isinstance(other, Vector3D):
# dot product
return self.x * other.x + self.y * other.y + self.z * other.z
# scalar product
return Vector3D(self.x * other, self.y * other, self.z * other)
def __rmul__(self, other):
return self * other
def __div__(self, other):
return Vector3D(self.x / other, self.y / other, self.z / other)
def __xor__(self, other): # cross product
return Vector3D(
self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __ne__(self, other):
return not (self == other)
class Matrix4x4(object):
def __init__(self, arr4x4=None):
if arr4x4 is None:
self.m = np.eye(4)
else:
self.m = np.asarray(arr4x4).reshape((4, 4))
def __str__(self):
return str(self.m)
@property
def f(self):
return self.m.flat
def get(self):
return self.m
def copy(self):
return Matrix4x4(self.m)
def set_to_identity(self):
self.m = np.eye(4)
@staticmethod
def translation(vector3D):
M = Matrix4x4()
## M.f[12] = vector3D.x
## M.f[13] = vector3D.y
## M.f[14] = vector3D.z
M.f[12:15] = vector3D.coord
return M
@staticmethod
def rotation_around_origin(angle_rad, axis_vector):
# Note: assumes axis_vector is normalized
c = np.cos(angle_rad)
s = np.sin(angle_rad)
one_minus_c = 1 - c
c0 = c + one_minus_c * axis_vector.x * axis_vector.x
c1 = one_minus_c * axis_vector.x * axis_vector.y
c2 = one_minus_c * axis_vector.x * axis_vector.z
c3 = c + one_minus_c * axis_vector.y * axis_vector.y
c4 = one_minus_c * axis_vector.y * axis_vector.z
c5 = c + one_minus_c * axis_vector.z * axis_vector.z
xs = axis_vector.x * s
ys = axis_vector.y * s
zs = axis_vector.z * s
M = Matrix4x4((c0, c1 + zs, c2 - ys, 0.0,
c1 - zs, c3, c4 + xs, 0.0,
c2 + ys, c4 - xs, c5, 0.0,
0.0, 0.0, 0.0, 1.0))
return M
@staticmethod
def rotation(angle_rad, axis_vector, origin_point):
v = origin_point.as_Vector3D()
return Matrix4x4.translation(v) * Matrix4x4.rotation_around_origin(angle_rad, axis_vector) * Matrix4x4.translation(- v)
@staticmethod
def uniform_scale_around_origin(scale_factor):
M = Matrix4x4()
M.m *= scale_factor
M.f[15] = 1.0
return M
@staticmethod
def uniform_scale(scale_factor, origin_point):
v = origin_point.as_Vector3D()
return Matrix4x4.translation(v) * Matrix4x4.uniform_scale_around_origin(scale_factor) * Matrix4x4.translation(- v)
@staticmethod
def look_at(eye_point, target_point, up_vector, is_inverted):
# step one: generate a rotation matrix
z = (eye_point - target_point).normalized()
y = up_vector
x = y ^ z # cross product
y = z ^ x # cross product
# Cross product gives area of parallelogram, which is < 1 for
# non-perpendicular unit-length vectors; so normalize x and y.
x = x.normalized()
y = y.normalized()
if is_inverted:
# the rotation matrix
M = Matrix4x4((x.x, x.y, x.z, 0.0,
y.x, y.y, y.z, 0.0,
z.x, z.y, z.z, 0.0,
0.0, 0.0, 0.0, 1.0))
# step two: premultiply by a translation matrix
return Matrix4x4.translation(eye_point.as_Vector3D()) * M
else:
# the rotation matrix
M = Matrix4x4((x.x, y.x, z.x, 0.0,
x.y, y.y, z.y, 0.0,
x.z, y.z, z.z, 0.0,
0.0, 0.0, 0.0, 1.0))
# step two: postmultiply by a translation matrix
return M * Matrix4x4.translation(- eye_point.as_Vector3D())
def __mul__(self, b):
a = self
if isinstance(b, Matrix4x4):
M = Matrix4x4(np.dot(b.m, a.m))
return M
elif isinstance(b, Vector3D):
# We treat the vector as if its (homogeneous) 4th component were zero.
return Vector3D(
a.f[0] * b.x + a.f[4] * b.y + a.f[8] * b.z, # + a.f[12]*b.w(),
a.f[1] * b.x + a.f[5] * b.y + a.f[9] * b.z, # + a.f[13]*b.w(),
a.f[2] * b.x + a.f[6] * b.y + a.f[10] * b.z # + a.f[14]*b.w(),
# a.f[3] * b.x + a.f[7] * b.y + a.f[11] * b.z + a.f[15]*b.w()
)
elif isinstance(b, Point3D):
# We treat the point as if its (homogeneous) 4th component were one.
return Point3D(
a.f[0] * b.x + a.f[4] * b.y + a.f[8] * b.z + a.f[12],
a.f[1] * b.x + a.f[5] * b.y + a.f[9] * b.z + a.f[13],
a.f[2] * b.x + a.f[6] * b.y + a.f[10] * b.z + a.f[14]
)
# END
| bsd-3-clause | 93c610e8f31a7793d553cfc6d8526625 | 28.639576 | 127 | 0.517763 | 3.008608 | false | false | false | false |
ejeschke/ginga | ginga/util/rgb_cms.py | 3 | 13759 | #
# rgb_cms.py -- RGB color management handling.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os
import glob
import hashlib
import tempfile
import numpy as np
from ginga import trcalc
from ginga.misc import Bunch
from ginga.util.toolbox import PIL_LT_9_1
from . import paths
from PIL import Image
# How about color management (ICC profile) support?
have_cms = False
have_pil_lcms = False
try:
import PIL.ImageCms as ImageCms
have_pil_lcms = True
have_cms = True
except ImportError:
pass
basedir = paths.ginga_home
# Holds profiles
profile_dict = {}
rendering_intent = 'perceptual'
intents = dict(perceptual=0)
# The working profile, if there is one
working_profile = None
# Holds transforms
icc_transform_dict = {}
class ColorManagerError(Exception):
pass
class ColorManager:
def __init__(self, logger):
self.logger = logger
def can_profile(self):
return have_cms
def profile_to_working_numpy(self, image_np, kwds, intent=None):
# If we have a working color profile then handle any embedded
# profile or color space information, if possible
if not have_cms:
self.logger.info(
"No CMS is installed; leaving image unprofiled.")
return image_np
if not have_profile(working_profile):
self.logger.info(
"No working profile defined; leaving image unprofiled.")
return image_np
out_profile = profile_dict[working_profile].name
if not os.path.exists(profile_dict[out_profile].path):
self.logger.info(
"Working profile '%s' (%s) not found; leaving image "
"unprofiled." % (out_profile, profile_dict[out_profile].path))
return image_np
if image_np.dtype != np.uint8:
## image_np = trcalc.array_convert(image_np, np.dtype(np.uint8))
self.logger.info(
"Cannot profile >8 bpp images; leaving image unprofiled.")
return image_np
if intent is None:
intent = rendering_intent
# Assume sRGB image, unless we learn to the contrary
in_profile = 'sRGB'
try:
if 'icc_profile' in kwds:
self.logger.info("image has embedded color profile")
buf_profile = kwds['icc_profile']
# NOTE: is there a way to get a name for this embedded profile?
# make up a unique name
in_profile = hashlib.md5(buf_profile).hexdigest() # nosec
# Write out embedded profile (if needed)
if in_profile not in profile_dict:
_fd, path = tempfile.mkstemp(suffix=".icc",
prefix="_image_{}_".format(in_profile))
with os.fdopen(_fd, 'wb') as icc_f:
icc_f.write(buf_profile)
profile_dict[in_profile] = Bunch.Bunch(name=in_profile,
path=os.path.abspath(path))
# see if there is any EXIF tag about the colorspace
elif 'ColorSpace' in kwds:
csp = kwds['ColorSpace']
#iop = kwds.get('InteroperabilityIndex', None)
if (csp == 0x2) or (csp == 0xffff):
# NOTE: 0xffff is really "undefined" and should be
# combined with a test of EXIF tag 0x0001
# ('InteropIndex') == 'R03', but PIL _getexif()
# does not return the InteropIndex
in_profile = 'AdobeRGB'
self.logger.debug("hmm..this looks like an AdobeRGB image")
elif csp == 0x1:
self.logger.debug("hmm..this looks like a sRGB image")
in_profile = 'sRGB'
else:
self.logger.debug("no color space metadata, assuming this is an sRGB image")
# check if image has an alpha channel; if so we need to remove
# it before ICC transform and tack it back on afterwards
image_rgb, alpha = image_np, None
if 'A' in trcalc.guess_order(image_np.shape):
image_rgb, alpha = trcalc.remove_alpha(image_np)
if have_pil_lcms:
# fallback to LCMS bundled with pillow, if available
# if we have a valid profile, try the conversion
tr_key = get_transform_key(in_profile, out_profile, intent,
None, None, 0)
# convert numpy image to PIL format
image_pil = to_image(image_rgb)
if tr_key in icc_transform_dict:
# We have an in-core transform already for this (faster)
image_pil = convert_profile_pil_transform(image_pil,
icc_transform_dict[tr_key],
inPlace=True)
else:
# Convert using profiles on disk (slower)
if in_profile in profile_dict:
in_profile = profile_dict[in_profile].path
image_pil = convert_profile_pil(image_pil, in_profile,
profile_dict[out_profile].path,
intent)
# convert PIL image to numpy format
image_rgb = from_image(image_pil)
# reattach alpha channel if there was one
if alpha is not None:
image_rgb = trcalc.add_alpha(image_rgb, alpha=alpha)
image_np = image_rgb
self.logger.info("converted from profile (%s) to profile (%s)" % (
in_profile, profile_dict[out_profile].name))
except Exception as e:
self.logger.error("Error converting from embedded color profile: {!r}".format(e),
exc_info=True)
self.logger.warning("Leaving image unprofiled.")
return image_np
def profile_to_working_pil(self, image_pil, kwds, intent=None):
image_np = from_image(image_pil)
image_np = self.profile_to_working_numpy(image_np, kwds, intent=intent)
return to_image(image_np)
# --- Color Management conversion functions ---
def to_image(image_np, flip_y=True):
if flip_y:
image_np = np.flipud(image_np)
return Image.fromarray(image_np)
def from_image(image_pil, flip_y=True):
image_np = np.array(image_pil)
if flip_y:
image_np = np.flipud(image_np)
return image_np
def convert_profile_pil(image_pil, inprof_path, outprof_path, intent_name,
inPlace=False):
if not have_cms:
return image_pil
image_out = ImageCms.profileToProfile(image_pil, inprof_path,
outprof_path,
renderingIntent=intents[intent_name],
outputMode='RGB', inPlace=inPlace,
flags=0)
if inPlace:
return image_pil
return image_out
def convert_profile_pil_transform(image_pil, transform, inPlace=False):
if not have_cms:
return image_pil
image_out = ImageCms.applyTransform(image_pil, transform, inPlace)
if inPlace:
return image_pil
return image_out
def convert_profile_numpy(image_np, inprof_path, outprof_path, intent_name):
if not have_cms:
return image_np
in_image_pil = to_image(image_np)
out_image_pil = convert_profile_pil(in_image_pil,
inprof_path, outprof_path, intent_name)
image_out = from_image(out_image_pil)
return image_out
def convert_profile_numpy_transform(image_np, transform):
if not have_cms:
return image_np
in_image_pil = to_image(image_np)
convert_profile_pil_transform(in_image_pil, transform, inPlace=True)
image_out = from_image(in_image_pil)
return image_out
def get_transform_key(from_name, to_name, to_intent, proof_name,
proof_intent, flags):
return (from_name, to_name, to_intent, proof_name, proof_intent, flags)
def get_transform(from_name, to_name, to_intent='perceptual',
proof_name=None, proof_intent=None,
use_black_pt=False):
global icc_transform_dict
if not have_cms:
return ColorManagerError("Either pillow is not installed, or there is "
"no ICC support in this version of pillow")
flags = 0
if proof_name is not None:
if hasattr(ImageCms, 'FLAGS'):
# supporting multiple versions of lcms...sigh..
flags |= ImageCms.FLAGS['SOFTPROOFING']
else:
flags |= ImageCms.SOFTPROOFING
if use_black_pt:
if hasattr(ImageCms, 'FLAGS'):
flags |= ImageCms.FLAGS['BLACKPOINTCOMPENSATION']
else:
flags |= ImageCms.BLACKPOINTCOMPENSATION
key = get_transform_key(from_name, to_name, to_intent, proof_name,
proof_intent, flags)
try:
output_transform = icc_transform_dict[key]
except KeyError:
# try to build transform on the fly
try:
if proof_name is not None:
output_transform = ImageCms.buildProofTransform(
profile_dict[from_name].path,
profile_dict[to_name].path,
profile_dict[proof_name].path,
'RGB', 'RGB',
renderingIntent=intents[to_intent],
proofRenderingIntent=intents[proof_intent],
flags=flags)
else:
output_transform = ImageCms.buildTransform(
profile_dict[from_name].path,
profile_dict[to_name].path,
'RGB', 'RGB',
renderingIntent=intents[to_intent],
flags=flags)
# cache it so we don't have to build it later
icc_transform_dict[key] = output_transform
except Exception as e:
raise ColorManagerError("Failed to build profile transform: {!r}".format(e))
return output_transform
def convert_profile_fromto(image_np, from_name, to_name,
to_intent='perceptual',
proof_name=None, proof_intent=None,
use_black_pt=False, logger=None):
if image_np.dtype != np.uint8:
## image_np = trcalc.array_convert(image_np, np.dtype(np.uint8))
if logger is not None:
logger.info(
"Cannot profile >8 bpp images; leaving image unprofiled.")
return image_np
alpha = None
ht, wd, dp = image_np.shape
if dp > 3:
# color profile conversion does not handle an alpha layer
image_np, alpha = trcalc.remove_alpha(image_np)
try:
output_transform = get_transform(from_name, to_name,
to_intent=to_intent,
proof_name=proof_name,
proof_intent=proof_intent,
use_black_pt=use_black_pt)
out_np = convert_profile_numpy_transform(image_np, output_transform)
if alpha is not None:
# reattach alpha layer if there was one
out_np = trcalc.add_alpha(out_np, alpha)
return out_np
except Exception as e:
if logger is not None:
logger.warning("Error converting profile from '{}' to '{}': {!r}".format(
from_name, to_name, e))
logger.warning("Leaving image unprofiled")
return image_np
def set_rendering_intent(intent):
"""
Sets the color management attribute rendering intent.
Parameters
----------
intent: integer
0: perceptual, 1: relative colorimetric, 2: saturation,
3: absolute colorimetric
"""
global rendering_intent
rendering_intent = intent
# Look up all the profiles the user has available
glob_pat = os.path.join(basedir, "profiles", "*.icc")
for path in glob.glob(glob_pat):
dirname, filename = os.path.split(path)
profname, ext = os.path.splitext(filename)
profile_dict[profname] = Bunch.Bunch(name=profname,
path=os.path.abspath(path))
if have_pil_lcms:
if PIL_LT_9_1:
d = dict(absolute_colorimetric=ImageCms.INTENT_ABSOLUTE_COLORIMETRIC,
perceptual=ImageCms.INTENT_PERCEPTUAL,
relative_colorimetric=ImageCms.INTENT_RELATIVE_COLORIMETRIC,
saturation=ImageCms.INTENT_SATURATION)
else:
d = dict(absolute_colorimetric=ImageCms.Intent.ABSOLUTE_COLORIMETRIC,
perceptual=ImageCms.Intent.PERCEPTUAL,
relative_colorimetric=ImageCms.Intent.RELATIVE_COLORIMETRIC,
saturation=ImageCms.Intent.SATURATION)
intents.update(d)
# Build transforms for profile conversions for which we have profiles
def have_profile(name):
return name in profile_dict.keys()
def get_profiles():
names = list(profile_dict.keys())
names.sort()
return names
def get_intents():
names = list(intents.keys())
names.sort()
return names
def set_profile_alias(alias, profname):
global profile_dict
profile_dict[alias] = profile_dict[profname]
| bsd-3-clause | 2e6b48a79795405d9b805e7b5d5a83dd | 33.483709 | 96 | 0.56305 | 3.991587 | false | false | false | false |
ejeschke/ginga | ginga/pilw/PilHelp.py | 3 | 5627 | #
# PilHelp.py -- help classes for the PIL drawing
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import numpy as np
from PIL import Image, ImageFont, ImageDraw
from ginga import colors
from ginga.fonts import font_asst
def get_cached_font(fontname, fontsize):
key = ('pil', fontname, fontsize)
try:
return font_asst.get_cache(key)
except KeyError:
# see if we can build the font
info = font_asst.get_font_info(fontname, subst_ok=True)
font = ImageFont.truetype(info.font_path, fontsize)
font_asst.add_cache(key, font)
return font
def load_font(font_name, font_file):
if not font_asst.have_font(font_name):
font_asst.add_font(font_file, font_name=font_name)
return font_name
def text_size(text, font):
f = get_cached_font(font.fontname, font.fontsize)
i = Image.new('RGBA', (1, 1))
d = ImageDraw.Draw(i, 'RGBA')
return d.textsize(text)
def text_to_array(text, font, rot_deg=0.0):
wd, ht = text_size(text, font)
f = get_cached_font(font.fontname, font.fontsize)
color = get_color(font.color)
i = Image.new('RGBA', (wd, ht))
d = ImageDraw.Draw(i, 'RGBA')
d.text((0, 0), text, font=f, fill=color)
i.rotate(rot_deg, expand=1)
arr8 = np.frombuffer(i.tobytes(), dtype=np.uint8)
arr8 = arr8.reshape((ht, wd, 4))
return arr8
def get_color(color, alpha=1.0):
if color is not None:
r, g, b = colors.resolve_color(color)
else:
r, g, b = 1.0, 1.0, 1.0
return (int(r * 255), int(g * 255), int(b * 255), int(alpha * 255))
class Pen(object):
def __init__(self, color='black', linewidth=1, alpha=1.0):
self.color = color
self.linewidth = linewidth
self.alpha = alpha
class Brush(object):
def __init__(self, color='black', fill=False, alpha=1.0):
self.color = color
self.fill = fill
self.alpha = alpha
class Font(object):
def __init__(self, fontname='Roboto', fontsize=12.0, color='black',
linewidth=1, alpha=1.0):
fontname = font_asst.resolve_alias(fontname, fontname)
self.fontname = fontname
self.fontsize = int(fontsize)
self.color = color
self.linewidth = linewidth
# scale relative to a 12pt font
self.scale = fontsize / 12.0
self.alpha = alpha
# TODO: currently there is only support for some simple built-in
# fonts. What kind of fonts/lookup can we use for this?
self.font = get_cached_font(self.fontname, self.fontsize)
class PilContext(object):
def __init__(self, surface):
self.set_canvas(surface)
def set_canvas(self, surface):
self.surface = surface
self.ctx = ImageDraw.Draw(surface, 'RGBA')
def get_color(self, color, alpha=1.0):
if color is not None:
r, g, b = colors.resolve_color(color)
else:
r, g, b = 1.0, 1.0, 1.0
return (int(r * 255), int(g * 255), int(b * 255), int(alpha * 255))
def get_pen(self, color, linewidth=1, alpha=1.0):
# if hasattr(self, 'linestyle'):
# if self.linestyle == 'dash':
# cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
#op = int(alpha * 255)
color = self.get_color(color, alpha=alpha)
return Pen(color=color, linewidth=linewidth, alpha=alpha)
def get_brush(self, color, alpha=1.0):
color = self.get_color(color, alpha=alpha)
return Brush(color=color, fill=True, alpha=alpha)
def get_font(self, name, size, color, linewidth=1, alpha=1.0):
color = self.get_color(color, alpha=alpha)
return Font(fontname=name, fontsize=size, color=color,
linewidth=linewidth, alpha=alpha)
def _cvt_points(self, points):
# PIL seems to have trouble with numpy arrays as sequences
# of points, so just convert to a list
return [(p[0], p[1]) for p in points]
def text_extents(self, text, font):
retval = self.ctx.textsize(text, font.font)
wd, ht = retval
return wd, ht
def image(self, pt, rgb_arr):
p_image = Image.fromarray(rgb_arr)
self.surface.paste(p_image)
def text(self, pt, text, font, pen):
x, y = pt
self.ctx.text((x, y), text, fill=pen.color, font=font.font)
def line(self, pt1, pt2, pen):
x1, y1 = int(np.round(pt1[0])), int(np.round(pt1[1]))
x2, y2 = int(np.round(pt2[0])), int(np.round(pt2[1]))
self.ctx.line(((x1, y1), (x2, y2)), fill=pen.color,
width=pen.linewidth)
def circle(self, pt, radius, pen, brush):
x, y = pt
radius = int(radius)
if (brush is not None) and brush.fill:
self.ctx.ellipse(((x - radius, y - radius),
(x + radius, y + radius)),
fill=brush.color, outline=pen.color)
else:
self.ctx.ellipse(((x - radius, y - radius),
(x + radius, y + radius)),
outline=pen.color)
def polygon(self, points, pen, brush):
points = self._cvt_points(points)
if (brush is not None) and brush.fill:
self.ctx.polygon(points, fill=brush.color, outline=pen.color)
else:
self.ctx.polygon(points, outline=pen.color)
def path(self, points, pen):
points = self._cvt_points(points)
p0 = points[0]
for pt in points[1:]:
self.line(p0, pt, pen)
p0 = pt
#END
| bsd-3-clause | ca52e224bcc8b386bdb99e67e0e012e0 | 30.088398 | 75 | 0.578461 | 3.267712 | false | false | false | false |
awesto/django-shop | shop/serializers/auth.py | 1 | 2730 | from django.conf import settings
from django.template.loader import select_template
from django.urls import NoReverseMatch, reverse
from django.utils.translation import get_language_from_request
from cms.models.pagemodel import Page
from rest_framework.serializers import CharField, BooleanField
from rest_auth import serializers
from shop.conf import app_settings
from shop.forms.auth import PasswordResetRequestForm
from rest_auth.serializers import LoginSerializer as DefaultLoginSerializer
class LoginSerializer(DefaultLoginSerializer):
stay_logged_in = BooleanField(required=False)
class PasswordResetRequestSerializer(serializers.PasswordResetSerializer):
password_reset_form_class = PasswordResetRequestForm
invalid_password_reset_confirm_url = '/cms-page_or_view_with__reverse_id=password-reset-confirm__does-not-exist/'
def save(self):
subject_template = select_template([
'{}/email/password-reset-subject.txt'.format(app_settings.APP_LABEL),
'shop/email/password-reset-subject.txt',
])
body_text_template = select_template([
'{}/email/password-reset-body.txt'.format(app_settings.APP_LABEL),
'shop/email/password-reset-body.txt',
])
body_html_template = select_template([
'{}/email/password-reset-body.html'.format(app_settings.APP_LABEL),
'shop/email/password-reset-body.html',
])
try:
page = Page.objects.select_related('node').get(reverse_id='password-reset-confirm', publisher_is_draft=False)
except Page.DoesNotExist:
try:
password_reset_confirm_url = reverse('password-reset-confirm')
except NoReverseMatch:
password_reset_confirm_url = self.invalid_password_reset_confirm_url
else:
language = get_language_from_request(self.context['request'])
password_reset_confirm_url = page.get_absolute_url(language)
opts = {
'use_https': self.context['request'].is_secure(),
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': self.context['request'],
'subject_template_name': subject_template.template.name,
'email_template_name': body_text_template.template.name,
'html_email_template_name': body_html_template.template.name,
'extra_email_context': {'password_reset_confirm_url': password_reset_confirm_url}
}
self.reset_form.save(**opts)
class PasswordResetConfirmSerializer(serializers.PasswordResetConfirmSerializer):
new_password1 = CharField(min_length=6, max_length=128)
new_password2 = CharField(min_length=6, max_length=128)
| bsd-3-clause | 096944895a74235409003cc9bf538f63 | 46.068966 | 121 | 0.69011 | 4.086826 | false | false | false | false |
awesto/django-shop | shop/views/auth.py | 1 | 8484 | from django.contrib.auth import logout, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_auth.views import LoginView as OriginalLoginView, PasswordChangeView as OriginalPasswordChangeView
from shop.models.cart import CartModel
from shop.models.customer import CustomerModel
from shop.rest.renderers import CMSPageRenderer
from shop.serializers.auth import PasswordResetRequestSerializer, PasswordResetConfirmSerializer
from shop.signals import email_queued
class AuthFormsView(GenericAPIView):
"""
Generic view to handle authentication related forms such as user registration
"""
serializer_class = None
form_class = None
def post(self, request, *args, **kwargs):
if request.customer.is_visitor:
customer = CustomerModel.objects.get_or_create_from_request(request)
else:
customer = request.customer
form_data = request.data.get(self.form_class.scope_prefix, {})
form = self.form_class(data=form_data, instance=customer)
if form.is_valid():
form.save(request=request)
response_data = {form.form_name: {
'success_message': _("Successfully registered yourself."),
}}
return Response(response_data, status=status.HTTP_200_OK)
errors = dict(form.errors)
if 'email' in errors:
errors.update({NON_FIELD_ERRORS: errors.pop('email')})
return Response({form.form_name: errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
class LoginView(OriginalLoginView):
form_name = 'login_form'
def login(self):
"""
Logs in as the given user, and moves the items from the current to the new cart.
"""
try:
anonymous_cart = CartModel.objects.get_from_request(self.request)
except CartModel.DoesNotExist:
anonymous_cart = None
if self.request.customer.user.is_anonymous or self.request.customer.is_registered:
previous_user = None
else:
previous_user = self.request.customer.user
super().login() # this rotates the session_key
if not self.serializer.data.get('stay_logged_in'):
self.request.session.set_expiry(0) # log out when the browser is closed
authenticated_cart = CartModel.objects.get_from_request(self.request)
if anonymous_cart:
# an anonymous customer logged in, now merge his current cart with a cart,
# which previously might have been created under his account.
authenticated_cart.merge_with(anonymous_cart)
if previous_user and previous_user.is_active is False and previous_user != self.request.user:
# keep the database clean and remove this anonymous entity
if previous_user.customer.orders.count() == 0:
previous_user.delete()
def post(self, request, *args, **kwargs):
self.request = request
if request.user.is_anonymous:
form_data = request.data.get('form_data', {})
self.serializer = self.get_serializer(data=form_data)
if self.serializer.is_valid():
self.login()
return self.get_response()
exc = ValidationError({self.form_name: self.serializer.errors})
else:
message = ErrorDetail("Please log out before signing in again.")
exc = ValidationError({self.form_name: {api_settings.NON_FIELD_ERRORS_KEY: [message]}})
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
class LogoutView(APIView):
"""
Calls Django logout method and delete the auth Token assigned to the current User object.
"""
permission_classes = (AllowAny,)
form_name = 'logout_form'
def post(self, request):
try:
request.user.auth_token.delete()
except:
pass
logout(request)
request.user = AnonymousUser()
response_data = {self.form_name: {'success_message': _("Successfully logged out.")}}
return Response(response_data)
class PasswordChangeView(OriginalPasswordChangeView):
form_name = 'password_change_form'
def post(self, request, *args, **kwargs):
form_data = request.data.get('form_data', {})
serializer = self.get_serializer(data=form_data)
if serializer.is_valid():
serializer.save()
response_data = {self.form_name: {
'success_message': _("Password has been changed successfully."),
}}
return Response(response_data)
return Response({self.form_name: serializer.errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
class PasswordResetRequestView(GenericAPIView):
"""
Calls Django Auth PasswordResetRequestForm save method.
Accepts the following POST parameters: email
Returns the success/fail message.
"""
serializer_class = PasswordResetRequestSerializer
permission_classes = (AllowAny,)
form_name = 'password_reset_request_form'
def post(self, request, *args, **kwargs):
form_data = request.data.get('form_data', {})
serializer = self.get_serializer(data=form_data)
if not serializer.is_valid():
return Response({self.form_name: serializer.errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
# send email containing a reset link
serializer.save()
# trigger async email queue
email_queued()
# Return the success message with OK HTTP status
msg = _("Instructions on how to reset the password have been sent to '{email}'.")
response_data = {self.form_name: {
'success_message': msg.format(**serializer.data),
}}
return Response(response_data)
class PasswordResetConfirmView(GenericAPIView):
"""
Password reset e-mail link points onto a CMS page with the Page ID = 'password-reset-confirm'.
This page then shall render the CMS plugin as provided by the **ShopAuthenticationPlugin** using
the form "Confirm Password Reset".
"""
renderer_classes = (CMSPageRenderer, JSONRenderer, BrowsableAPIRenderer)
serializer_class = PasswordResetConfirmSerializer
permission_classes = (AllowAny,)
token_generator = default_token_generator
form_name = 'password_reset_confirm_form'
def get(self, request, uidb64=None, token=None):
data = {'uid': uidb64, 'token': token}
serializer_class = self.get_serializer_class()
password = get_user_model().objects.make_random_password()
data.update(new_password1=password, new_password2=password)
serializer = serializer_class(data=data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response({'validlink': False})
return Response({
'validlink': True,
'user_name': force_str(serializer.user),
'form_name': 'password_reset_form',
})
def post(self, request, uidb64=None, token=None):
try:
data = dict(request.data['form_data'], uid=uidb64, token=token)
except (KeyError, TypeError, ValueError):
errors = {'non_field_errors': [_("Invalid POST data.")]}
else:
serializer = self.get_serializer(data=data)
if serializer.is_valid():
serializer.save()
response_data = {self.form_name: {
'success_message': _("Password has been reset with the new password."),
}}
return Response(response_data)
else:
errors = serializer.errors
return Response({self.form_name: errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
| bsd-3-clause | c150e4a48d2a0f709f14d1805920ed04 | 41.208955 | 109 | 0.660655 | 4.289181 | false | false | false | false |
lektor/lektor | lektor/imagetools.py | 1 | 18162 | # -*- coding: utf-8 -*-
import decimal
import os
import posixpath
import re
import struct
import warnings
from datetime import datetime
from enum import Enum
from xml.etree import ElementTree as etree
import exifread
import filetype
from lektor.reporter import reporter
from lektor.utils import get_dependent_url
from lektor.utils import locate_executable
from lektor.utils import portable_popen
# yay shitty library
datetime.strptime("", "")
class ThumbnailMode(Enum):
FIT = "fit"
CROP = "crop"
STRETCH = "stretch"
DEFAULT = "fit"
@property
def label(self):
"""The mode's label as used in templates."""
warnings.warn(
"ThumbnailMode.label is deprecated. (Use ThumbnailMode.value instead.)",
DeprecationWarning,
)
return self.value
@classmethod
def from_label(cls, label):
"""Looks up the thumbnail mode by its textual representation."""
warnings.warn(
"ThumbnailMode.from_label is deprecated. "
"Use the ThumbnailMode constructor, "
'e.g. "ThumbnailMode(label)", instead.',
DeprecationWarning,
)
return cls(label)
def _convert_gps(coords, hem):
deg, min, sec = [float(x.num) / float(x.den) for x in coords]
sign = -1 if hem in "SW" else 1
return sign * (deg + min / 60.0 + sec / 3600.0)
def _combine_make(make, model):
make = make or ""
model = model or ""
if make and model.startswith(make):
return make
return " ".join([make, model]).strip()
_parse_svg_units_re = re.compile(
r"(?P<value>[+-]?(?:\d+)(?:\.\d+)?)\s*(?P<unit>\D+)?", flags=re.IGNORECASE
)
def _parse_svg_units_px(length):
match = _parse_svg_units_re.match(length)
if not match:
return None
match = match.groupdict()
if match["unit"] and match["unit"] != "px":
return None
try:
return float(match["value"])
except ValueError:
return None
class EXIFInfo:
def __init__(self, d):
self._mapping = d
def __bool__(self):
return bool(self._mapping)
__nonzero__ = __bool__
def to_dict(self):
rv = {}
for key, value in self.__class__.__dict__.items():
if key[:1] != "_" and isinstance(value, property):
rv[key] = getattr(self, key)
return rv
def _get_string(self, key):
try:
value = self._mapping[key].values
except KeyError:
return None
if isinstance(value, str):
return value
return value.decode("utf-8", "replace")
def _get_int(self, key):
try:
return self._mapping[key].values[0]
except LookupError:
return None
def _get_float(self, key, precision=4):
try:
val = self._mapping[key].values[0]
if isinstance(val, int):
return float(val)
return round(float(val.num) / float(val.den), precision)
except LookupError:
return None
def _get_frac_string(self, key):
try:
val = self._mapping[key].values[0]
return "%s/%s" % (val.num, val.den)
except LookupError:
return None
@property
def artist(self):
return self._get_string("Image Artist")
@property
def copyright(self):
return self._get_string("Image Copyright")
@property
def camera_make(self):
return self._get_string("Image Make")
@property
def camera_model(self):
return self._get_string("Image Model")
@property
def camera(self):
return _combine_make(self.camera_make, self.camera_model)
@property
def lens_make(self):
return self._get_string("EXIF LensMake")
@property
def lens_model(self):
return self._get_string("EXIF LensModel")
@property
def lens(self):
return _combine_make(self.lens_make, self.lens_model)
@property
def aperture(self):
return self._get_float("EXIF ApertureValue")
@property
def f_num(self):
return self._get_float("EXIF FNumber")
@property
def f(self):
return "ƒ/%s" % self.f_num
@property
def exposure_time(self):
return self._get_frac_string("EXIF ExposureTime")
@property
def shutter_speed(self):
val = self._get_float("EXIF ShutterSpeedValue")
if val is not None:
return "1/%d" % round(
1 / (2**-val) # pylint: disable=invalid-unary-operand-type
)
return None
@property
def focal_length(self):
val = self._get_float("EXIF FocalLength")
if val is not None:
return "%smm" % val
return None
@property
def focal_length_35mm(self):
val = self._get_float("EXIF FocalLengthIn35mmFilm")
if val is not None:
return "%dmm" % val
return None
@property
def flash_info(self):
try:
value = self._mapping["EXIF Flash"].printable
except KeyError:
return None
if isinstance(value, str):
return value
return value.decode("utf-8")
@property
def iso(self):
val = self._get_int("EXIF ISOSpeedRatings")
if val is not None:
return val
return None
@property
def created_at(self):
date_tags = (
"GPS GPSDate",
"Image DateTimeOriginal",
"EXIF DateTimeOriginal",
"EXIF DateTimeDigitized",
"Image DateTime",
)
for tag in date_tags:
try:
return datetime.strptime(
self._mapping[tag].printable, "%Y:%m:%d %H:%M:%S"
)
except (KeyError, ValueError):
continue
return None
@property
def longitude(self):
try:
return _convert_gps(
self._mapping["GPS GPSLongitude"].values,
self._mapping["GPS GPSLongitudeRef"].printable,
)
except KeyError:
return None
@property
def latitude(self):
try:
return _convert_gps(
self._mapping["GPS GPSLatitude"].values,
self._mapping["GPS GPSLatitudeRef"].printable,
)
except KeyError:
return None
@property
def altitude(self):
val = self._get_float("GPS GPSAltitude")
if val is not None:
try:
ref = self._mapping["GPS GPSAltitudeRef"].values[0]
except LookupError:
ref = 0
if ref == 1:
val *= -1
return val
return None
@property
def location(self):
lat = self.latitude
long = self.longitude
if lat is not None and long is not None:
return (lat, long)
return None
@property
def documentname(self):
return self._get_string("Image DocumentName")
@property
def description(self):
return self._get_string("Image ImageDescription")
@property
def is_rotated(self):
"""Return if the image is rotated according to the Orientation header.
The Orientation header in EXIF stores an integer value between
1 and 8, where the values 5-8 represent "portrait" orientations
(rotated 90deg left, right, and mirrored versions of those), i.e.,
the image is rotated.
"""
return self._get_int("Image Orientation") in {5, 6, 7, 8}
def get_suffix(width, height, mode, quality=None):
suffix = "" if width is None else str(width)
if height is not None:
suffix += "x%s" % height
if mode != ThumbnailMode.DEFAULT:
suffix += "_%s" % mode.value
if quality is not None:
suffix += "_q%s" % quality
return suffix
def get_svg_info(fp):
_, svg = next(etree.iterparse(fp, ["start"]), (None, None))
fp.seek(0)
width, height = None, None
if svg is not None and svg.tag == "{http://www.w3.org/2000/svg}svg":
width = _parse_svg_units_px(svg.attrib.get("width", ""))
height = _parse_svg_units_px(svg.attrib.get("height", ""))
return "svg", width, height
# see http://www.w3.org/Graphics/JPEG/itu-t81.pdf
# Table B.1 – Marker code assignments (page 32/36)
_JPEG_SOF_MARKERS = (
# non-differential, Hufmann-coding
0xC0,
0xC1,
0xC2,
0xC3,
# differential, Hufmann-coding
0xC5,
0xC6,
0xC7,
# non-differential, arithmetic-coding
0xC9,
0xCA,
0xCB,
# differential, arithmetic-coding
0xCD,
0xCE,
0xCF,
)
def get_image_info(fp):
"""Reads some image info from a file descriptor."""
head = fp.read(32)
fp.seek(0)
if len(head) < 24:
return "unknown", None, None
magic_bytes = b"<?xml", b"<svg"
if any(map(head.strip().startswith, magic_bytes)):
return get_svg_info(fp)
_type = filetype.image_match(bytearray(head))
fmt = _type.mime.split("/")[1] if _type else None
width = None
height = None
if fmt == "png":
check = struct.unpack(">i", head[4:8])[0]
if check == 0x0D0A1A0A:
width, height = struct.unpack(">ii", head[16:24])
elif fmt == "gif":
width, height = struct.unpack("<HH", head[6:10])
elif fmt == "jpeg":
# specification available under
# http://www.w3.org/Graphics/JPEG/itu-t81.pdf
# Annex B (page 31/35)
# we are looking for a SOF marker ("start of frame").
# skip over the "start of image" marker
# (filetype detection took care of that).
fp.seek(2)
while True:
byte = fp.read(1)
# "All markers are assigned two-byte codes: an X’FF’ byte
# followed by a byte which is not equal to 0 or X’FF’."
if not byte or ord(byte) != 0xFF:
raise Exception("Malformed JPEG image.")
# "Any marker may optionally be preceded by any number
# of fill bytes, which are bytes assigned code X’FF’."
while ord(byte) == 0xFF:
byte = fp.read(1)
if ord(byte) not in _JPEG_SOF_MARKERS:
# header length parameter takes 2 bytes for all markers
length = struct.unpack(">H", fp.read(2))[0]
fp.seek(length - 2, 1)
continue
# else...
# see Figure B.3 – Frame header syntax (page 35/39) and
# Table B.2 – Frame header parameter sizes and values
# (page 36/40)
fp.seek(3, 1) # skip header length and precision parameters
height, width = struct.unpack(">HH", fp.read(4))
if height == 0:
# "Value 0 indicates that the number of lines shall be
# defined by the DNL marker [...]"
#
# DNL is not supported by most applications,
# so we won't support it either.
raise Exception("JPEG with DNL not supported.")
break
# if the file is rotated, we want, for all intents and purposes,
# to return the dimensions swapped. (all client apps will display
# the image rotated, and any template computations are likely to want
# to make decisions based on the "visual", not the "real" dimensions.
# thumbnail code also depends on this behaviour.)
fp.seek(0)
if is_rotated(fp):
width, height = height, width
else:
fmt = None
return fmt, width, height
def read_exif(fp):
"""Reads exif data from a file pointer of an image and returns it."""
exif = exifread.process_file(fp)
return EXIFInfo(exif)
def is_rotated(fp):
"""Fast version of read_exif(fp).is_rotated, using an exif header subset."""
exif = exifread.process_file(fp, stop_tag="Orientation", details=False)
return EXIFInfo(exif).is_rotated
def find_imagemagick(im=None):
"""Finds imagemagick and returns the path to it."""
# If it's provided explicitly and it's valid, we go with that one.
if im is not None and os.path.isfile(im):
return im
# On windows, imagemagick was renamed to magick, because
# convert is system utility for fs manipulation.
imagemagick_exe = "convert" if os.name != "nt" else "magick"
rv = locate_executable(imagemagick_exe)
if rv is not None:
return rv
# Give up.
raise RuntimeError("Could not locate imagemagick.")
def get_thumbnail_ext(source_filename):
ext = source_filename.rsplit(".", 1)[-1].lower()
# if the extension is already of a format that a browser understands
# we will roll with it.
if ext.lower() in ("png", "jpg", "jpeg", "gif"):
return None
# Otherwise we roll with JPEG as default.
return ".jpeg"
def get_quality(source_filename):
ext = source_filename.rsplit(".", 1)[-1].lower()
if ext.lower() == "png":
return 75
return 85
def compute_dimensions(width, height, source_width, source_height):
"""computes the bounding dimensions"""
computed_width, computed_height = width, height
width, height, source_width, source_height = (
None if v is None else float(v)
for v in (width, height, source_width, source_height)
)
source_ratio = source_width / source_height
def _round(x):
# make sure things get top-rounded, to be consistent with imagemagick
return int(decimal.Decimal(x).to_integral(decimal.ROUND_HALF_UP))
if width is None or (height is not None and width / height > source_ratio):
computed_width = _round(height * source_ratio)
else:
computed_height = _round(width / source_ratio)
return computed_width, computed_height
def process_image(
ctx,
source_image,
dst_filename,
width=None,
height=None,
mode=ThumbnailMode.DEFAULT,
quality=None,
):
"""Build image from source image, optionally compressing and resizing.
"source_image" is the absolute path of the source in the content directory,
"dst_filename" is the absolute path of the target in the output directory.
"""
if width is None and height is None:
raise ValueError("Must specify at least one of width or height.")
im = find_imagemagick(ctx.build_state.config["IMAGEMAGICK_EXECUTABLE"])
if quality is None:
quality = get_quality(source_image)
resize_key = ""
if width is not None:
resize_key += str(width)
if height is not None:
resize_key += "x" + str(height)
if mode == ThumbnailMode.STRETCH:
resize_key += "!"
cmdline = [im, source_image, "-auto-orient"]
if mode == ThumbnailMode.CROP:
cmdline += [
"-resize",
resize_key + "^",
"-gravity",
"Center",
"-extent",
resize_key,
]
else:
cmdline += ["-resize", resize_key]
cmdline += ["-strip", "-colorspace", "sRGB"]
cmdline += ["-quality", str(quality), dst_filename]
reporter.report_debug_info("imagemagick cmd line", cmdline)
portable_popen(cmdline).wait()
def make_image_thumbnail(
ctx,
source_image,
source_url_path,
width=None,
height=None,
mode=ThumbnailMode.DEFAULT,
upscale=None,
quality=None,
):
"""Helper method that can create thumbnails from within the build process
of an artifact.
"""
if width is None and height is None:
raise ValueError("Must specify at least one of width or height.")
# temporarily fallback to "fit" in case of erroneous arguments
# to preserve backward-compatibility.
# this needs to change to an exception in the future.
if mode != ThumbnailMode.FIT and (width is None or height is None):
warnings.warn(
f'"{mode.value}" mode requires both `width` and `height` '
'to be specified. Falling back to "fit" mode.'
)
mode = ThumbnailMode.FIT
if upscale is None and mode in (ThumbnailMode.CROP, ThumbnailMode.STRETCH):
upscale = True
with open(source_image, "rb") as f:
format, source_width, source_height = get_image_info(f)
if format is None:
raise RuntimeError("Cannot process unknown images")
# If we are dealing with an actual svg image, we do not actually
# resize anything, we just return it. This is not ideal but it's
# better than outright failing.
if format == "svg":
return Thumbnail(source_url_path, width, height)
if mode == ThumbnailMode.FIT:
computed_width, computed_height = compute_dimensions(
width, height, source_width, source_height
)
else:
computed_width, computed_height = width, height
would_upscale = computed_width > source_width or computed_height > source_height
if would_upscale and not upscale:
return Thumbnail(source_url_path, source_width, source_height)
suffix = get_suffix(width, height, mode, quality=quality)
dst_url_path = get_dependent_url(
source_url_path, suffix, ext=get_thumbnail_ext(source_image)
)
def build_thumbnail_artifact(artifact):
artifact.ensure_dir()
process_image(
ctx,
source_image,
artifact.dst_filename,
width,
height,
mode,
quality=quality,
)
ctx.sub_artifact(artifact_name=dst_url_path, sources=[source_image])(
build_thumbnail_artifact
)
return Thumbnail(dst_url_path, computed_width, computed_height)
class Thumbnail:
"""Holds information about a thumbnail."""
def __init__(self, url_path, width, height=None):
#: the `width` of the thumbnail in pixels.
self.width = width
#: the `height` of the thumbnail in pixels.
self.height = height
#: the URL path of the image.
self.url_path = url_path
def __str__(self):
return posixpath.basename(self.url_path)
| bsd-3-clause | f2b04180a15808b2cb1dc3843bb31752 | 27.571654 | 84 | 0.586893 | 3.873399 | false | false | false | false |
lektor/lektor | lektor/sourcesearch.py | 1 | 3790 | import sqlite3
from lektor.constants import PRIMARY_ALT
def _iter_parents(path):
path = path.strip("/")
if path:
pieces = path.split("/")
for x in range(len(pieces)):
yield "/" + "/".join(pieces[:x])
def _find_info(infos, alt, lang):
for info in infos:
if info["alt"] == alt and info["lang"] == lang:
return info
return None
def _id_from_path(path):
try:
return path.strip("/").split("/")[-1]
except IndexError:
return ""
def _mapping_from_cursor(cur):
rv = {}
for path, alt, lang, type, title in cur.fetchall():
rv.setdefault(path, []).append(
{
"id": _id_from_path(path),
"path": path,
"alt": alt,
"type": type,
"lang": lang,
"title": title,
}
)
return rv
def _find_best_info(infos, alt, lang):
for _alt, _lang in [
(alt, lang),
(PRIMARY_ALT, lang),
(alt, "en"),
(PRIMARY_ALT, "en"),
]:
rv = _find_info(infos, _alt, _lang)
if rv is not None:
return rv
return None
def _build_parent_path(path, mapping, alt, lang):
rv = []
for parent in _iter_parents(path):
info = _find_best_info(mapping.get(parent) or [], alt, lang)
id = _id_from_path(parent)
if info is None:
title = id or "(Index)"
else:
title = info.get("title")
rv.append({"id": id, "path": parent, "title": title})
return rv
def _process_search_results(builder, cur, alt, lang, limit):
mapping = _mapping_from_cursor(cur)
rv = []
files_needed = set()
for path, infos in mapping.items():
info = _find_best_info(infos, alt, lang)
if info is None:
continue
for parent in _iter_parents(path):
if parent not in mapping:
files_needed.add(parent)
rv.append(info)
if len(rv) == limit:
break
if files_needed:
cur.execute(
"""
select path, alt, lang, type, title
from source_info
where path in (%s)
"""
% ", ".join(["?"] * len(files_needed)),
list(files_needed),
)
mapping.update(_mapping_from_cursor(cur))
for info in rv:
info["parents"] = _build_parent_path(info["path"], mapping, alt, lang)
return rv
def find_files(builder, query, alt=PRIMARY_ALT, lang=None, limit=50, types=None):
if types is None:
types = ["page"]
else:
types = list(types)
languages = ["en"]
if lang not in ("en", None):
languages.append(lang)
else:
lang = "en"
alts = [PRIMARY_ALT]
if alt != PRIMARY_ALT:
alts.append(alt)
query = query.strip()
title_like = "%" + query + "%"
path_like = "/%" + query.rstrip("/") + "%"
con = sqlite3.connect(builder.buildstate_database_filename, timeout=10)
try:
cur = con.cursor()
cur.execute(
"""
select path, alt, lang, type, title
from source_info
where (title like ? or path like ?)
and lang in (%s)
and alt in (%s)
and type in (%s)
order by title
collate nocase
limit ?
"""
% (
", ".join(["?"] * len(languages)),
", ".join(["?"] * len(alts)),
", ".join(["?"] * len(types)),
),
[title_like, path_like] + languages + alts + types + [limit * 2],
)
return _process_search_results(builder, cur, alt, lang, limit)
finally:
con.close()
| bsd-3-clause | adbd1151c896dfee4765e128d2f0e0c7 | 24.436242 | 81 | 0.483641 | 3.7749 | false | false | false | false |
awesto/django-shop | shop/models/fields.py | 1 | 4239 | import enum
from django.conf import settings
from django.db import models
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
postgresql_engine_names = [
'django.db.backends.postgresql',
'django.db.backends.postgresql_psycopg2',
]
if settings.DATABASES['default']['ENGINE'] in postgresql_engine_names:
from django.contrib.postgres.fields import JSONField as _JSONField
else:
from jsonfield.fields import JSONField as _JSONField
class JSONField(_JSONField):
def __init__(self, *args, **kwargs):
kwargs.update({'default': dict})
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['default']
return name, path, args, kwargs
class ChoiceEnumMeta(enum.EnumMeta):
def __call__(cls, value, *args, **kwargs):
if isinstance(value, str):
try:
value = cls.__members__[value]
except KeyError:
pass # let the super method complain
return super().__call__(value, *args, **kwargs)
def __new__(metacls, classname, bases, classdict):
labels = {}
for key in classdict._member_names:
source_value = classdict[key]
if isinstance(source_value, (list, tuple)):
try:
val, labels[key] = source_value
except ValueError:
raise ValueError("Invalid ChoiceEnum member '{}'".format(key))
else:
val = source_value
labels[key] = key.replace("_", " ").title()
# Use dict.__setitem__() to suppress defenses against
# double assignment in enum's classdict
dict.__setitem__(classdict, key, val)
cls = super().__new__(metacls, classname, bases, classdict)
for key, label in labels.items():
getattr(cls, key).label = label
return cls
@property
def choices(cls):
return [(k.value, k.label) for k in cls]
@property
def default(cls):
try:
return next(iter(cls))
except StopIteration:
return None
class ChoiceEnum(enum.Enum, metaclass=ChoiceEnumMeta):
"""
Utility class to handle choices in Django model and/or form fields.
Usage:
class Color(ChoiceEnum):
WHITE = 0, "White"
RED = 1, "Red"
GREEN = 2, "Green"
BLUE = 3, "Blue"
green = Color.GREEN
color = forms.ChoiceField(
choices=Color.choices,
default=Color.default,
)
"""
def __str__(self):
return force_str(self.label)
class ChoiceEnumField(models.PositiveSmallIntegerField):
description = _("Customer recognition state")
def __init__(self, *args, **kwargs):
self.enum_type = kwargs.pop('enum_type', ChoiceEnum) # fallback is required form migrations
if not issubclass(self.enum_type, ChoiceEnum):
raise ValueError("enum_type must be a subclass of `ChoiceEnum`.")
kwargs.update(choices=self.enum_type.choices)
kwargs.setdefault('default', self.enum_type.default)
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if 'choices' in kwargs:
del kwargs['choices']
if kwargs['default'] is self.enum_type.default:
del kwargs['default']
elif isinstance(kwargs['default'], self.enum_type):
kwargs['default'] = kwargs['default'].value
return name, path, args, kwargs
def from_db_value(self, value, expression, connection):
try:
return self.enum_type(value)
except ValueError:
return value
def get_prep_value(self, state):
if isinstance(state, self.enum_type):
return state.value
return state
def to_python(self, state):
return self.enum_type(state)
def value_to_string(self, obj):
value = getattr(obj, self.name, obj)
if not isinstance(value, self.enum_type):
raise ValueError("Value must be of type {}".format(self.enum_type))
return value.name
| bsd-3-clause | dc14dbc3988e53c9c40055762598a495 | 31.113636 | 100 | 0.598018 | 4.243243 | false | false | false | false |
awesto/django-shop | shop/migrations/0010_auto_20191224_0727.py | 2 | 1248 | # Generated by Django 2.2.9 on 2019-12-24 07:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
class Migration(migrations.Migration):
dependencies = [
('shop', '0009_delete_email'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='mail_template',
field=models.ForeignKey(limit_choices_to=models.Q(('language__isnull', True), ('language', ''), _connector='OR'), on_delete=django.db.models.deletion.CASCADE, to='post_office.EmailTemplate', verbose_name='Template'),
),
migrations.AlterField(
model_name='notification',
name='recipient',
field=models.ForeignKey(limit_choices_to={'is_staff': True}, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Recipient'),
),
migrations.AlterField(
model_name='notificationattachment',
name='attachment',
field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='email_attachment', to='filer.File'),
),
]
| bsd-3-clause | eca77163b5f58759b3e841b04259b648 | 39.258065 | 228 | 0.652244 | 4.025806 | false | false | false | false |
lektor/lektor | lektor/types/flow.py | 1 | 7145 | import re
from jinja2 import is_undefined
from jinja2 import TemplateNotFound
from markupsafe import Markup
from lektor.constants import PRIMARY_ALT
from lektor.context import get_ctx
from lektor.metaformat import tokenize
from lektor.types.base import Type
_block_re = re.compile(r"^####\s*([^#]*?)\s*####\s*$")
_line_unescape_re = re.compile(r"^#####(.*?)#####(\s*)$")
def discover_relevant_flowblock_models(flow, pad, record, alt):
"""Returns a dictionary of all relevant flow blocks. If no list of
flow block names is provided all flow blocks are returned. Otherwise
only flow blocks that are in the list or are children of flowblocks
in the list are returned.
"""
flow_blocks = flow.flow_blocks
all_blocks = pad.db.flowblocks
if flow_blocks is None:
return dict((k, v.to_json(pad, record, alt)) for k, v in all_blocks.items())
wanted_blocks = set()
to_process = flow_blocks[:]
while to_process:
block_name = to_process.pop()
flowblock = all_blocks.get(block_name)
if block_name in wanted_blocks or flowblock is None:
continue
wanted_blocks.add(block_name)
for field in flowblock.fields:
if isinstance(field.type, FlowType):
if field.type.flow_blocks is None:
raise RuntimeError(
"Nested flow-blocks require explicit "
"list of involved blocks."
)
to_process.extend(field.type.flow_blocks)
rv = {}
for block_name in wanted_blocks:
rv[block_name] = all_blocks[block_name].to_json(pad, record, alt)
return rv
class BadFlowBlock(Exception):
pass
class FlowBlock:
"""Represents a flowblock for the template."""
def __init__(self, data, pad, record):
self._data = data
self._bound_data = {}
self.pad = pad
self.record = record
@property
def flowblockmodel(self):
"""The flowblock model that created this flow block."""
return self.pad.db.flowblocks[self._data["_flowblock"]]
def __contains__(self, name):
return name in self._data and not is_undefined(self._data[name])
def __getitem__(self, name):
# If any data of a flowblock is accessed, we record that we need
# this dependency.
ctx = get_ctx()
if ctx is not None:
ctx.record_dependency(self.flowblockmodel.filename)
rv = self._bound_data.get(name, Ellipsis)
if rv is not Ellipsis:
return rv
rv = self._data[name]
if hasattr(rv, "__get__"):
rv = rv.__get__(self.record)
self._bound_data[name] = rv
return rv
def __html__(self):
ctx = get_ctx()
# If we're in a nested render, we disable the rendering here or we
# risk a recursion error.
if ctx is None or self in ctx.flow_block_render_stack:
return Markup.escape(repr(self))
ctx.flow_block_render_stack.append(self)
try:
try:
return self.pad.db.env.render_template(
[
"blocks/%s.html" % self._data["_flowblock"],
"blocks/default.html",
],
pad=self.pad,
this=self,
alt=self.record.alt,
values={"record": self.record},
)
except TemplateNotFound:
return Markup("[could not find snippet template]")
finally:
ctx.flow_block_render_stack.pop()
def __repr__(self):
return "<%s %r>" % (
self.__class__.__name__,
self["_flowblock"],
)
class Flow:
def __init__(self, blocks, record):
self.blocks = blocks
self.record = record
def __html__(self):
return Markup("\n\n".join(x.__html__() for x in self.blocks))
def __bool__(self):
return bool(self.blocks)
__nonzero__ = __bool__
def __repr__(self):
return "<%s %r>" % (
self.__class__.__name__,
self.blocks,
)
class FlowDescriptor:
def __init__(self, blocks, pad):
self._blocks = blocks
self._pad = pad
def __get__(self, obj, type=None):
if obj is None:
return self
return Flow([FlowBlock(data, self._pad, obj) for data in self._blocks], obj)
def process_flowblock_data(raw_value):
lineiter = iter(raw_value.splitlines(True))
block = None
buf = []
blocks = []
for line in lineiter:
# Until we found the first block, we ignore leading whitespace.
if block is None and not line.strip():
continue
# Find a new block start
block_start = _block_re.match(line)
if block_start is None:
if block is None:
raise BadFlowBlock("Did not find beginning of flow block")
else:
if block is not None:
blocks.append((block, buf))
buf = []
block = block_start.group(1)
continue
buf.append(_line_unescape_re.sub("####\\1####\\2", line))
if block is not None:
blocks.append((block, buf))
return blocks
class FlowType(Type):
widget = "flow"
def __init__(self, env, options):
Type.__init__(self, env, options)
self.flow_blocks = [
x.strip() for x in options.get("flow_blocks", "").split(",") if x.strip()
] or None
def value_from_raw(self, raw):
if raw.value is None:
return raw.missing_value("Missing flow")
if raw.pad is None:
return raw.missing_value(
"Flow value was technically present "
"but used in a place where it cannot "
"be used."
)
db = raw.pad.db
rv = []
try:
for block, block_lines in process_flowblock_data(raw.value):
# Unknown flow blocks are skipped for the moment
if self.flow_blocks is not None and block not in self.flow_blocks:
continue
flowblock = db.flowblocks.get(block)
if flowblock is None:
continue
d = {}
for key, lines in tokenize(block_lines):
d[key] = "".join(lines)
rv.append(flowblock.process_raw_data(d, pad=raw.pad))
except BadFlowBlock as e:
return raw.bad_value(str(e))
return FlowDescriptor(rv, raw.pad)
def to_json(self, pad, record=None, alt=PRIMARY_ALT):
rv = Type.to_json(self, pad, record, alt)
rv["flowblocks"] = discover_relevant_flowblock_models(self, pad, record, alt)
block_order = self.flow_blocks
if block_order is None:
block_order = [
k
for k, v in sorted(pad.db.flowblocks.items(), key=lambda x: x[1].order)
]
rv["flowblock_order"] = block_order
return rv
| bsd-3-clause | 14ccb183f2375f2f631dcf18e8d73cf2 | 29.021008 | 87 | 0.544437 | 4.061967 | false | false | false | false |
awesto/django-shop | shop/views/checkout.py | 1 | 7082 | from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.utils.module_loading import import_string
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from cms.plugin_pool import plugin_pool
from shop import messages
from shop.conf import app_settings
from shop.exceptions import ProductNotAvailable
from shop.models.cart import CartModel
from shop.serializers.checkout import CheckoutSerializer
from shop.serializers.cart import CartSerializer
from shop.modifiers.pool import cart_modifiers_pool
class CheckoutViewSet(GenericViewSet):
"""
View for our REST endpoint to communicate with the various forms used during the checkout.
"""
serializer_label = 'checkout'
serializer_class = CheckoutSerializer
cart_serializer_class = CartSerializer
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dialog_forms = set([import_string(fc) for fc in app_settings.SHOP_DIALOG_FORMS])
try:
from shop.cascade.plugin_base import DialogFormPluginBase
except ImproperlyConfigured:
# cmsplugins_cascade has not been installed
pass
else:
# gather form classes from Cascade plugins for our checkout views
for p in plugin_pool.get_all_plugins():
if issubclass(p, DialogFormPluginBase):
if hasattr(p, 'form_classes'):
self.dialog_forms.update([import_string(fc) for fc in p.form_classes])
if hasattr(p, 'form_class'):
self.dialog_forms.add(import_string(p.form_class))
@action(methods=['put'], detail=False, url_path='upload')
def upload(self, request):
"""
Use this REST endpoint to upload the payload of all forms used to setup the checkout
dialogs. This method takes care to dispatch the uploaded payload to each corresponding
form.
"""
# sort posted form data by plugin order
cart = CartModel.objects.get_from_request(request)
dialog_data = []
for form_class in self.dialog_forms:
if form_class.scope_prefix in request.data:
if 'plugin_order' in request.data[form_class.scope_prefix]:
dialog_data.append((form_class, request.data[form_class.scope_prefix]))
else:
for data in request.data[form_class.scope_prefix].values():
dialog_data.append((form_class, data))
dialog_data = sorted(dialog_data, key=lambda tpl: int(tpl[1]['plugin_order']))
# save data, get text representation and collect potential errors
errors, response_data, set_is_valid = {}, {}, True
with transaction.atomic():
for form_class, data in dialog_data:
form = form_class.form_factory(request, data, cart)
if form.is_valid():
# empty error dict forces revalidation by the client side validation
errors[form_class.form_name] = {}
else:
errors[form_class.form_name] = form.errors
set_is_valid = False
# by updating the response data, we can override the form's content
update_data = form.get_response_data()
if isinstance(update_data, dict):
response_data[form_class.form_name] = update_data
# persist changes in cart
if set_is_valid:
cart.save()
# add possible form errors for giving feedback to the customer
if set_is_valid:
return Response(response_data)
else:
return Response(errors, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
@action(methods=['get'], detail=False, url_path='digest')
def digest(self, request):
"""
Returns the summaries of the cart and various checkout forms to be rendered in non-editable fields.
"""
cart = CartModel.objects.get_from_request(request)
cart.update(request)
context = self.get_serializer_context()
checkout_serializer = self.serializer_class(cart, context=context, label=self.serializer_label)
cart_serializer = self.cart_serializer_class(cart, context=context, label='cart')
response_data = {
'checkout_digest': checkout_serializer.data,
'cart_summary': cart_serializer.data,
}
return Response(data=response_data)
@action(methods=['post'], detail=False, url_path='purchase')
def purchase(self, request):
"""
This is the final step on converting a cart into an order object. It normally is used in
combination with the plugin :class:`shop.cascade.checkout.ProceedButtonPlugin` to render
a button labeled "Purchase Now".
"""
cart = CartModel.objects.get_from_request(request)
try:
cart.update(request, raise_exception=True)
except ProductNotAvailable as exc:
message = _("The product '{product_name}' ({product_code}) suddenly became unavailable, "\
"presumably because someone else has been faster purchasing it.\n Please "\
"recheck the cart or add an alternative product and proceed with the checkout.").\
format(product_name=exc.product.product_name, product_code=exc.product.product_code)
messages.error(request, message, title=_("Product Disappeared"), delay=10)
message = _("The product '{product_name}' ({product_code}) suddenly became unavailable.").\
format(product_name=exc.product.product_name, product_code=exc.product.product_code)
response_data = {'purchasing_error_message': message}
return Response(data=response_data, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
cart.save()
response_data = {}
try:
# Iterate over the registered modifiers, and search for the active payment service provider
for modifier in cart_modifiers_pool.get_payment_modifiers():
if modifier.is_active(cart.extra.get('payment_modifier')):
expression = modifier.payment_provider.get_payment_request(cart, request)
response_data.update(expression=expression)
break
except ValidationError as err:
message = _("Please select a valid payment method.")
messages.warning(request, message, title=_("Choose Payment Method"), delay=5)
response_data = {'purchasing_error_message': '. '.join(err.detail)}
return Response(data=response_data, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
return Response(data=response_data)
| bsd-3-clause | 99b5fdc0cd2a81937cefca232b8272e7 | 48.873239 | 107 | 0.639791 | 4.507957 | false | false | false | false |
awesto/django-shop | shop/admin/product.py | 1 | 5823 | from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.contrib import admin
from django.contrib.sites.models import Site
from django.utils.translation import gettext_lazy as _
try:
from django_elasticsearch_dsl.registries import registry as elasticsearch_registry
except ImportError:
elasticsearch_registry = type('DocumentRegistry', (), {'get_documents': lambda *args: []})()
from adminsortable2.admin import SortableInlineAdminMixin
from cms.models import Page
from shop.models.related import ProductPageModel, ProductImageModel
class ProductImageInline(SortableInlineAdminMixin, admin.StackedInline):
model = ProductImageModel
extra = 1
ordering = ['order']
def _find_catalog_list_apphook():
from shop.cms_apphooks import CatalogListCMSApp
from cms.apphook_pool import apphook_pool
for name, app in apphook_pool.apps.items():
if isinstance(app, CatalogListCMSApp):
return name
else:
raise ImproperlyConfigured("You must register a CMS apphook of type `CatalogListCMSApp`.")
class CategoryModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
if Site.objects.count() >=2 :
page_sitename=str(Site.objects.filter(djangocms_nodes=obj.node_id).first().name)
return '{} | {}'.format(str(obj), page_sitename)
else:
return str(obj)
class CMSPageAsCategoryMixin:
"""
Add this mixin class to the ModelAdmin class for products wishing to be assigned to djangoCMS
pages when used as categories.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(self.model, 'cms_pages'):
raise ImproperlyConfigured("Product model requires a field named `cms_pages`")
def get_fieldsets(self, request, obj=None):
fieldsets = list(super().get_fieldsets(request, obj=obj))
fieldsets.append((_("Categories"), {'fields': ('cms_pages',)}),)
return fieldsets
def get_fields(self, request, obj=None):
# In ``get_fieldsets()``, ``cms_pages`` is added, so remove it from ``fields`` to
# avoid showing it twice.
fields = list(super().get_fields(request, obj))
try:
fields.remove('cms_pages')
except ValueError:
pass
return fields
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'cms_pages':
# restrict many-to-many field for cms_pages to ProductApp only
limit_choices_to = {
'publisher_is_draft': False,
'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook()),
}
queryset = Page.objects.filter(**limit_choices_to)
widget = admin.widgets.FilteredSelectMultiple(_("CMS Pages"), False)
required = not db_field.blank
field = CategoryModelMultipleChoiceField(queryset=queryset, widget=widget, required=required)
return field
return super().formfield_for_manytomany(db_field, request, **kwargs)
def save_related(self, request, form, formsets, change):
old_cms_pages = form.instance.cms_pages.all()
new_cms_pages = form.cleaned_data.pop('cms_pages')
# remove old
for page in old_cms_pages:
if page not in new_cms_pages:
for pp in ProductPageModel.objects.filter(product=form.instance, page=page):
pp.delete()
# add new
for page in new_cms_pages:
if page not in old_cms_pages:
ProductPageModel.objects.create(product=form.instance, page=page)
return super().save_related(request, form, formsets, change)
class SearchProductIndexMixin:
"""
If Elasticsearch is used to create a full text search index, add this mixin class to Django's
``ModelAdmin`` backend for the corresponding product model.
"""
def save_model(self, request, product, form, change):
super().save_model(request, product, form, change)
if change:
product.update_search_index()
def delete_model(self, request, product):
product.active = False
product.update_search_index()
super().delete_model(request, product)
class InvalidateProductCacheMixin:
"""
If Redis caching is used to create a HTML snippets for product representation, add this mixin
class to Django's ``ModelAdmin`` backend for the corresponding product model.
"""
def save_model(self, request, product, form, change):
if change:
product.invalidate_cache()
return super().save_model(request, product, form, change)
def delete_model(self, request, product):
product.invalidate_cache()
super().delete_model(request, product)
class UnitPriceMixin:
def get_list_display(self, request):
list_display = list(super().get_list_display(request))
list_display.append('get_unit_price')
return list_display
def get_unit_price(self, obj):
return str(obj.unit_price)
get_unit_price.short_description = _("Unit Price")
class CMSPageFilter(admin.SimpleListFilter):
title = _("Category")
parameter_name = 'category'
def lookups(self, request, model_admin):
limit_choices_to = {
'publisher_is_draft': False,
'application_urls': getattr(self, 'limit_to_cmsapp', _find_catalog_list_apphook())
}
queryset = Page.objects.filter(**limit_choices_to)
return [(page.id, page.get_title()) for page in queryset]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(cms_pages__id=self.value())
| bsd-3-clause | a9bc6766c33f2e5b355f9a312a4bc175 | 35.85443 | 105 | 0.65413 | 4.112288 | false | false | false | false |
justquick/django-activity-stream | actstream/templatetags/activity_tags.py | 3 | 8314 | from django.contrib.contenttypes.models import ContentType
from django.template import Variable, Library, Node, TemplateSyntaxError
from django.template.loader import render_to_string
from django.urls import reverse
from actstream.models import Follow, Action
register = Library()
class DisplayActivityFollowUrl(Node):
def __init__(self, actor, actor_only=True, flag=''):
self.actor = Variable(actor)
self.actor_only = actor_only
self.flag = flag
def render(self, context):
actor_instance = self.actor.resolve(context)
content_type = ContentType.objects.get_for_model(actor_instance).pk
kwargs = {
'content_type_id': content_type,
'object_id': actor_instance.pk
}
if self.flag:
kwargs['flag'] = self.flag
if Follow.objects.is_following(context.get('user'), actor_instance, flag=self.flag):
return reverse('actstream_unfollow', kwargs=kwargs)
if self.actor_only:
return reverse('actstream_follow', kwargs=kwargs)
return reverse('actstream_follow_all', kwargs=kwargs)
class DisplayActivityActorUrl(Node):
def __init__(self, actor):
self.actor = Variable(actor)
def render(self, context):
actor_instance = self.actor.resolve(context)
content_type = ContentType.objects.get_for_model(actor_instance).pk
return reverse('actstream_actor', kwargs={
'content_type_id': content_type, 'object_id': actor_instance.pk})
class AsNode(Node):
"""
Base template Node class for template tags that takes a predefined number
of arguments, ending in an optional 'as var' section.
"""
args_count = 1
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse and return a Node.
"""
tag_error = "Accepted formats {%% %(tagname)s %(args)s %%} or " \
"{%% %(tagname)s %(args)s as [var] %%}"
bits = token.split_contents()
args_count = len(bits) - 1
if args_count >= 2 and bits[-2] == 'as':
as_var = bits[-1]
args_count -= 2
else:
as_var = None
if args_count != cls.args_count:
arg_list = ' '.join(['[arg]' * cls.args_count])
raise TemplateSyntaxError(tag_error % {'tagname': bits[0],
'args': arg_list})
args = [parser.compile_filter(tkn)
for tkn in bits[1:args_count + 1]]
return cls(args, varname=as_var)
def __init__(self, args, varname=None):
self.args = args
self.varname = varname
def render(self, context):
result = self.render_result(context)
if self.varname is not None:
context[self.varname] = result
return ''
return result
def render_result(self, context):
raise NotImplementedError("Must be implemented by a subclass")
class DisplayAction(AsNode):
def render_result(self, context):
action_instance = context['action'] = self.args[0].resolve(context)
templates = [
'actstream/%s/action.html' % action_instance.verb.replace(' ', '_'),
'actstream/action.html',
]
return render_to_string(templates, context.flatten())
def display_action(parser, token):
"""
Renders the template for the action description
::
{% display_action action %}
"""
return DisplayAction.handle_token(parser, token)
def is_following(user, actor):
"""
Returns true if the given user is following the actor
::
{% if request.user|is_following:another_user %}
You are already following {{ another_user }}
{% endif %}
"""
return Follow.objects.is_following(user, actor)
class IsFollowing(AsNode):
args_count = 3
def render_result(self, context):
user = self.args[0].resolve(context)
actor = self.args[1].resolve(context)
flag = self.args[2].resolve(context)
return Follow.objects.is_following(user, actor, flag=flag)
def is_following_tag(parser, token):
"""
Returns true if the given user is following the actor marked by a flag, such as 'liking', 'watching' etc..
You can also save the returned value to a template variable by as syntax.
If you don't want to specify a flag, pass an empty string or use `is_following` template filter.
::
{% is_following user group "liking" %}
{% is_following user group "liking" as is_liking %}
{% is_following user group "" as is_following %}
"""
return IsFollowing.handle_token(parser, token)
def follow_url(parser, token):
"""
Renders the URL of the follow view for a particular actor instance
::
<a href="{% follow_url other_user %}">
{% if request.user|is_following:other_user %}
stop following
{% else %}
follow
{% endif %}
</a>
<a href="{% follow_url other_user 'watching' %}">
{% is_following user group "watching" as is_watching %}
{% if is_watching %}
stop watching
{% else %}
watch
{% endif %}
</a>
"""
bits = token.split_contents()
if len(bits) > 3:
raise TemplateSyntaxError("Accepted format {% follow_url [instance] %} or {% follow_url [instance] [flag] %}")
elif len(bits) == 2:
return DisplayActivityFollowUrl(bits[1])
else:
flag = bits[2][1:-1]
return DisplayActivityFollowUrl(bits[1], flag=flag)
def follow_all_url(parser, token):
"""
Renders the URL to follow an object as both actor and target
::
<a href="{% follow_all_url other_user %}">
{% if request.user|is_following:other_user %}
stop following
{% else %}
follow
{% endif %}
</a>
<a href="{% follow_all_url other_user 'watching' %}">
{% is_following user group "watching" as is_watching %}
{% if is_watching %}
stop watching
{% else %}
watch
{% endif %}
</a>
"""
bits = token.split_contents()
if len(bits) > 3:
raise TemplateSyntaxError(
"Accepted format {% follow_all_url [instance] %} or {% follow_url [instance] [flag] %}"
)
elif len(bits) == 2:
return DisplayActivityFollowUrl(bits[1], actor_only=False)
else:
flag = bits[2][1:-1]
return DisplayActivityFollowUrl(bits[1], actor_only=False, flag=flag)
def actor_url(parser, token):
"""
Renders the URL for a particular actor instance
::
<a href="{% actor_url request.user %}">View your actions</a>
<a href="{% actor_url another_user %}">{{ another_user }}'s actions</a>
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("Accepted format "
"{% actor_url [actor_instance] %}")
else:
return DisplayActivityActorUrl(*bits[1:])
def activity_stream(context, stream_type, *args, **kwargs):
"""
Renders an activity stream as a list into the template's context.
Streams loaded by stream_type can be the default ones (eg user, actor, etc.) or a user defined stream.
Extra args/kwargs are passed into the stream call.
::
{% activity_stream 'actor' user %}
{% for action in stream %}
{% display_action action %}
{% endfor %}
"""
if stream_type == 'model':
stream_type = 'model_actions'
if not hasattr(Action.objects, stream_type):
raise TemplateSyntaxError('Action manager has no attribute: %s' % stream_type)
ctxvar = kwargs.pop('as', 'stream')
context[ctxvar] = getattr(Action.objects, stream_type)(*args, **kwargs)
return ''
register.filter(activity_stream)
register.filter(is_following)
register.tag(name='is_following', compile_function=is_following_tag)
register.tag(display_action)
register.tag(follow_url)
register.tag(follow_all_url)
register.tag(actor_url)
register.simple_tag(takes_context=True)(activity_stream)
| bsd-3-clause | 49619c014a9059ce232c4c3c5a6c9efa | 29.907063 | 118 | 0.589488 | 4.010613 | false | false | false | false |
awesto/django-shop | shop/cascade/processbar.py | 1 | 3541 | from django.forms import fields, widgets
from django.utils.translation import gettext_lazy as _, ngettext_lazy
from django.utils.text import Truncator
from django.utils.html import format_html
from django.forms.fields import IntegerField
from django.template.loader import select_template
from entangled.forms import EntangledModelFormMixin
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.widgets import NumberInputWidget
from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer
from shop.conf import app_settings
from shop.cascade.plugin_base import ShopPluginBase
class ProcessBarFormMixin(ManageChildrenFormMixin, EntangledModelFormMixin):
num_children = IntegerField(
min_value=1,
initial=1,
widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em;'}),
label=_("Steps"),
help_text=_("Number of steps for this proceed bar."))
class Meta:
untangled_fields = ['num_children']
class ProcessBarPlugin(TransparentWrapper, ShopPluginBase):
name = _("Process Bar")
form = ProcessBarFormMixin
parent_classes = ('BootstrapColumnPlugin',)
direct_child_classes = ('ProcessStepPlugin',)
require_parent = True
allow_children = True
@classmethod
def get_identifier(cls, instance):
identifier = super().get_identifier(instance)
num_cols = instance.get_children().count()
content = ngettext_lazy('with {} page', 'with {} pages', num_cols).format(num_cols)
return format_html('{0}{1}', identifier, content)
def get_render_template(self, context, instance, placeholder):
template_names = [
'{}/checkout/process-bar.html'.format(app_settings.APP_LABEL),
'shop/checkout/process-bar.html',
]
return select_template(template_names)
def render(self, context, instance, placeholder):
self.super(ProcessBarPlugin, self).render(context, instance, placeholder)
num_children = instance.get_num_children()
if num_children > 0:
context['step_css_width'] = '{:3.2f}%'.format(100. / num_children)
return context
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super().save_model(request, obj, form, change)
self.extend_children(obj, wanted_children, ProcessStepPlugin)
plugin_pool.register_plugin(ProcessBarPlugin)
class ProcessStepFormMixin(EntangledModelFormMixin):
step_title = fields.CharField(
widget=widgets.TextInput(attrs={'size': 50}),
label=_("Step Title"),
required=False,
)
class Meta:
entangled_fields = {'glossary': ['step_title']}
class ProcessStepPlugin(TransparentContainer, ShopPluginBase):
name = _("Process Step")
direct_parent_classes = parent_classes = ['ProcessBarPlugin']
require_parent = True
allow_children = True
alien_child_classes = True
form = ProcessStepFormMixin
render_template = 'cascade/generic/wrapper.html'
@classmethod
def get_identifier(cls, obj):
identifier = super().get_identifier(obj)
content = obj.glossary.get('step_title', '')
if content:
content = Truncator(content).words(3, truncate=' ...')
else:
content = obj.get_position_in_placeholder()
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(ProcessStepPlugin)
| bsd-3-clause | 6c2f4cc4f167e6d2e3fefea73cdf867e | 36.273684 | 91 | 0.690765 | 4.042237 | false | false | false | false |
awesto/django-shop | shop/modifiers/pool.py | 1 | 2625 | from django.core.exceptions import ImproperlyConfigured
from shop.conf import app_settings
class CartModifiersPool:
USE_CACHE = True
def __init__(self):
self._modifiers_list = []
def get_all_modifiers(self):
"""
Returns all registered modifiers of this shop instance.
"""
if not self.USE_CACHE or not self._modifiers_list:
self._modifiers_list = []
for modifiers_class in app_settings.CART_MODIFIERS:
if issubclass(modifiers_class, (list, tuple)):
self._modifiers_list.extend([mc() for mc in modifiers_class()])
else:
self._modifiers_list.append(modifiers_class())
# check for uniqueness of the modifier's `identifier` attribute
ModifierException = ImproperlyConfigured("Each modifier requires a unique attribute 'identifier'.")
try:
identifiers = [m.identifier for m in self._modifiers_list]
except AttributeError:
raise ModifierException
for i in identifiers:
if identifiers.count(i) > 1:
raise ModifierException
return self._modifiers_list
def get_shipping_modifiers(self):
"""
Returns all registered shipping modifiers of this shop instance.
"""
from shop.shipping.modifiers import ShippingModifier
return [m for m in self.get_all_modifiers() if isinstance(m, ShippingModifier)]
def get_payment_modifiers(self):
"""
Returns all registered payment modifiers of this shop instance.
"""
from shop.payment.modifiers import PaymentModifier
return [m for m in self.get_all_modifiers() if isinstance(m, PaymentModifier)]
def get_active_shipping_modifier(self, shipping_modifier):
"""
Return the shipping modifier object for the given string.
"""
from shop.shipping.modifiers import ShippingModifier
for modifier in self.get_all_modifiers():
if isinstance(modifier, ShippingModifier) and modifier.is_active(shipping_modifier):
return modifier
def get_active_payment_modifier(self, payment_modifier):
"""
Return the payment modifier object for the given string.
"""
from shop.payment.modifiers import PaymentModifier
for modifier in self.get_all_modifiers():
if isinstance(modifier, PaymentModifier) and modifier.is_active(payment_modifier):
return modifier
cart_modifiers_pool = CartModifiersPool()
| bsd-3-clause | be8a556ddf5d8642d8741579e9c61e22 | 36.5 | 111 | 0.632 | 4.888268 | false | false | false | false |
awesto/django-shop | shop/serializers/checkout.py | 1 | 2307 | from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
from rest_framework import serializers
from shop.conf import app_settings
class SerializeFormAsTextField(serializers.SerializerMethodField):
def __init__(self, form_class_name, **kwargs):
try:
self.form_class = import_string(app_settings.SHOP_CASCADE_FORMS[form_class_name])
except ImportError:
msg = "Can not import Form class. Please check your settings directive SHOP_CASCADE_FORMS['{}']."
raise ImproperlyConfigured(msg.format(form_class_name))
super().__init__(**kwargs)
def to_representation(self, value):
method = getattr(self.parent, self.method_name)
try:
return method(self.form_class, value)
except AttributeError:
return
class CheckoutSerializer(serializers.Serializer):
"""
Serializer to digest a summary of data required for the checkout.
"""
customer_tag = SerializeFormAsTextField('CustomerForm')
shipping_address_tag = SerializeFormAsTextField('ShippingAddressForm')
billing_address_tag = SerializeFormAsTextField('BillingAddressForm')
shipping_method_tag = SerializeFormAsTextField('ShippingMethodForm')
payment_method_tag = SerializeFormAsTextField('PaymentMethodForm')
extra_annotation_tag = SerializeFormAsTextField('ExtraAnnotationForm')
def get_customer_tag(self, form_class, cart):
form = form_class(instance=cart.customer)
return form.as_text()
def get_shipping_address_tag(self, form_class, cart):
form = form_class(instance=cart.shipping_address, cart=cart)
return form.as_text()
def get_billing_address_tag(self, form_class, cart):
form = form_class(instance=cart.billing_address, cart=cart)
return form.as_text()
def get_shipping_method_tag(self, form_class, cart):
form = form_class(initial=cart.extra, cart=cart)
return form.as_text()
def get_payment_method_tag(self, form_class, cart):
form = form_class(initial=cart.extra, cart=cart)
return form.as_text()
def get_extra_annotation_tag(self, form_class, cart):
form = form_class(initial=cart.extra, cart=cart)
return form.as_text()
| bsd-3-clause | ba6ab378246a2e0852d572506a5e0e12 | 39.473684 | 109 | 0.695709 | 3.963918 | false | false | false | false |
awesto/django-shop | shop/cascade/checkout.py | 1 | 16437 | from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.forms import fields, widgets
from django.template import engines
from django.template.loader import select_template
from django.utils.html import format_html
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from cms.plugin_pool import plugin_pool
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cmsplugin_cascade.bootstrap4.buttons import ButtonFormMixin
from cmsplugin_cascade.strides import strides_plugin_map, strides_element_map, TextStridePlugin, TextStrideElement
from cmsplugin_cascade.icon.forms import IconFormMixin
from cmsplugin_cascade.link.config import LinkPluginBase, LinkFormMixin
from cmsplugin_cascade.link.plugin_base import LinkElementMixin
from cmsplugin_cascade.plugin_base import TransparentContainer
from cmsplugin_cascade.bootstrap4.buttons import BootstrapButtonMixin
from shop.cascade.plugin_base import ShopPluginBase, DialogFormPluginBase, DialogPluginBaseForm
from shop.conf import app_settings
from shop.models.cart import CartModel
from shop.modifiers.pool import cart_modifiers_pool
class ProceedButtonFormMixin(LinkFormMixin, IconFormMixin, ButtonFormMixin):
require_icon = False
LINK_TYPE_CHOICES = [
('cmspage', _("CMS Page")),
('NEXT_STEP', _("Next Step")),
('RELOAD_PAGE', _("Reload Page")),
('PURCHASE_NOW', _("Purchase Now")),
('DO_NOTHING', _("Do nothing")),
]
disable_invalid = fields.BooleanField(
label=_("Disable if invalid"),
required=False,
help_text=_("Disable button if any form in this set is invalid."),
)
class Meta:
entangled_fields = {'glossary': ['disable_invalid']}
class ShopProceedButton(BootstrapButtonMixin, LinkPluginBase):
"""
This button is used to proceed from one checkout step to the next one.
"""
name = _("Proceed Button")
parent_classes = ['BootstrapColumnPlugin', 'ProcessStepPlugin', 'ValidateSetOfFormsPlugin']
form = ProceedButtonFormMixin
model_mixins = (LinkElementMixin,)
ring_plugin = 'ProceedButtonPlugin'
class Media:
js = ['admin/js/jquery.init.js', 'shop/js/admin/proceedbuttonplugin.js']
@classmethod
def get_identifier(cls, instance):
return mark_safe(instance.glossary.get('link_content', ''))
def get_render_template(self, context, instance, placeholder):
if instance.link == 'NEXT_STEP':
button_template = 'next-step-button'
elif instance.link == 'RELOAD_PAGE':
button_template = 'reload-button'
elif instance.link == 'PURCHASE_NOW':
button_template = 'purchase-button'
elif instance.link == 'DO_NOTHING':
button_template = 'noop-button'
else:
button_template = 'proceed-button'
template_names = [
'{}/checkout/{}.html'.format(app_settings.APP_LABEL, button_template),
'shop/checkout/{}.html'.format(button_template),
]
return select_template(template_names)
plugin_pool.register_plugin(ShopProceedButton)
class CustomerFormPluginBase(DialogFormPluginBase):
"""
Base class for CustomerFormPlugin and GuestFormPlugin to share common methods.
"""
template_leaf_name = 'customer-{}.html'
cache = False
def get_form_data(self, context, instance, placeholder):
form_data = self.super(CustomerFormPluginBase, self).get_form_data(context, instance, placeholder)
form_data.update(instance=context['request'].customer)
return form_data
def get_render_template(self, context, instance, placeholder):
if 'error_message' in context:
return engines['django'].from_string('<p class="text-danger">{{ error_message }}</p>')
return self.super(CustomerFormPluginBase, self).get_render_template(context, instance, placeholder)
class CustomerFormPlugin(CustomerFormPluginBase):
"""
Provides the form to edit specific data stored in :class:`shop.model.customer.CustomerModel`,
if customer declared himself as registered.
"""
name = _("Customer Form")
form_class = app_settings.SHOP_CASCADE_FORMS['CustomerForm']
def render(self, context, instance, placeholder):
if not context['request'].customer.is_registered:
context['error_message'] = _("Only registered customers can access this form.")
return context
return self.super(CustomerFormPlugin, self).render(context, instance, placeholder)
DialogFormPluginBase.register_plugin(CustomerFormPlugin)
class GuestFormPlugin(CustomerFormPluginBase):
"""
Provides the form to edit specific data stored in model `Customer`, if customer declared
himself as guest.
"""
name = _("Guest Form")
form_class = app_settings.SHOP_CASCADE_FORMS['GuestForm']
def render(self, context, instance, placeholder):
assert 'customer' in context, "Please add 'shop.context_processors.customer' to your TEMPLATES 'context_processor' settings."
if not context['customer'].is_guest:
context['error_message'] = _("Only guest customers can access this form.")
return context
return self.super(GuestFormPlugin, self).render(context, instance, placeholder)
DialogFormPluginBase.register_plugin(GuestFormPlugin)
class CheckoutAddressPluginForm(DialogPluginBaseForm):
ADDRESS_CHOICES = [
('shipping', _("Shipping")),
('billing', _("Billing")),
]
address_form = fields.ChoiceField(
choices=ADDRESS_CHOICES,
widget=widgets.RadioSelect,
label=_("Address Form"),
initial=ADDRESS_CHOICES[0][0]
)
allow_multiple = fields.BooleanField(
label=_("Multiple Addresses"),
initial=False,
required=False,
help_text=_("Allow the customer to add and edit multiple addresses."),
)
allow_use_primary = fields.BooleanField(
label=_("Use primary address"),
initial=False,
required=False,
help_text=_("Allow the customer to use the primary address, if this is the secondary form."),
)
class Meta:
entangled_fields = {'glossary': ['address_form', 'allow_multiple', 'allow_use_primary']}
class CheckoutAddressPlugin(DialogFormPluginBase):
name = _("Checkout Address Form")
form = CheckoutAddressPluginForm
# glossary_field_order = ['address_form', 'render_type', 'allow_multiple', 'allow_use_primary', 'headline_legend']
form_classes = [app_settings.SHOP_CASCADE_FORMS['ShippingAddressForm'], app_settings.SHOP_CASCADE_FORMS['BillingAddressForm']]
def get_form_class(self, instance):
if instance.glossary.get('address_form') == 'shipping':
return import_string(self.form_classes[0])
else: # address_form == billing
return import_string(self.form_classes[1])
def get_address(self, cart, instance):
if instance.glossary.get('address_form') == 'shipping':
if cart.shipping_address:
address = cart.shipping_address
else:
# fallback to another existing shipping address
FormClass = self.get_form_class(instance)
address = FormClass.get_model().objects.get_fallback(customer=cart.customer)
else: # address_form == billing
if cart.billing_address:
address = cart.billing_address
else:
# fallback to another existing billing address
FormClass = self.get_form_class(instance)
address = FormClass.get_model().objects.get_fallback(customer=cart.customer)
return address
def get_form_data(self, context, instance, placeholder):
form_data = self.super(CheckoutAddressPlugin, self).get_form_data(context, instance, placeholder)
if form_data.get('cart') is None:
raise PermissionDenied("Can not proceed to checkout without cart")
address = self.get_address(form_data['cart'], instance)
if instance.glossary.get('allow_multiple'):
form_data.update(multi_addr=True)
else:
form_data.update(multi_addr=False)
form_data.update(
instance=address,
initial={'active_priority': address.priority if address else 'add'},
allow_use_primary=instance.glossary.get('allow_use_primary', False)
)
return form_data
@classmethod
def get_identifier(cls, instance):
identifier = super().get_identifier(instance)
address_form = instance.glossary.get('address_form')
address_form = dict(cls.form.ADDRESS_CHOICES).get(address_form, '')
return format_html(pgettext_lazy('get_identifier', "for {} {}"), address_form, identifier)
def get_render_template(self, context, instance, placeholder):
addr_form = instance.glossary.get('address_form')
if addr_form not in ['shipping', 'billing']: # validate
addr_form = 'shipping'
render_type = instance.glossary.get('render_type')
if render_type not in ['form', 'summary']: # validate
render_type = 'form'
template_names = [
'{0}/checkout/{1}-address-{2}.html'.format(app_settings.APP_LABEL, addr_form, render_type),
'shop/checkout/{0}-address-{1}.html'.format(addr_form, render_type),
]
return select_template(template_names)
DialogFormPluginBase.register_plugin(CheckoutAddressPlugin)
class MethodPluginForm(DialogPluginBaseForm):
show_additional_charge = fields.BooleanField(
label=_("Show additional charge"),
initial=True,
required=False,
help_text=_("Add an extra line showing the additional charge depending on the chosen payment/shipping method."),
)
class Meta:
entangled_fields = {'glossary': ['show_additional_charge']}
class PaymentMethodFormPlugin(DialogFormPluginBase):
name = _("Payment Method Form")
form = MethodPluginForm
form_class = app_settings.SHOP_CASCADE_FORMS['PaymentMethodForm']
template_leaf_name = 'payment-method-{}.html'
def get_form_data(self, context, instance, placeholder):
form_data = self.super(PaymentMethodFormPlugin, self).get_form_data(context, instance, placeholder)
cart = form_data.get('cart')
if cart:
form_data.update(initial={'payment_modifier': cart.extra.get('payment_modifier')})
return form_data
def render(self, context, instance, placeholder):
self.super(PaymentMethodFormPlugin, self).render(context, instance, placeholder)
for payment_modifier in cart_modifiers_pool.get_payment_modifiers():
payment_modifier.update_render_context(context)
context['show_additional_charge'] = instance.glossary.get('show_additional_charge', False)
return context
if cart_modifiers_pool.get_payment_modifiers():
# Plugin is registered only if at least one payment modifier exists
DialogFormPluginBase.register_plugin(PaymentMethodFormPlugin)
class ShippingMethodFormPlugin(DialogFormPluginBase):
name = _("Shipping Method Form")
form = MethodPluginForm
form_class = app_settings.SHOP_CASCADE_FORMS['ShippingMethodForm']
template_leaf_name = 'shipping-method-{}.html'
def get_form_data(self, context, instance, placeholder):
form_data = self.super(ShippingMethodFormPlugin, self).get_form_data(context, instance, placeholder)
cart = form_data.get('cart')
if cart:
form_data.update(initial={'shipping_modifier': cart.extra.get('shipping_modifier')})
return form_data
def render(self, context, instance, placeholder):
self.super(ShippingMethodFormPlugin, self).render(context, instance, placeholder)
for shipping_modifier in cart_modifiers_pool.get_shipping_modifiers():
shipping_modifier.update_render_context(context)
context['show_additional_charge'] = instance.glossary.get('show_additional_charge', False)
return context
if cart_modifiers_pool.get_shipping_modifiers():
# Plugin is registered only if at least one shipping modifier exists
DialogFormPluginBase.register_plugin(ShippingMethodFormPlugin)
class ExtraAnnotationFormPlugin(DialogFormPluginBase):
name = _("Extra Annotation Form")
form_class = app_settings.SHOP_CASCADE_FORMS['ExtraAnnotationForm']
template_leaf_name = 'extra-annotation-{}.html'
def get_form_data(self, context, instance, placeholder):
form_data = self.super(ExtraAnnotationFormPlugin, self).get_form_data(context, instance, placeholder)
cart = form_data.get('cart')
if cart:
form_data.update(initial={'annotation': cart.extra.get('annotation', '')})
return form_data
DialogFormPluginBase.register_plugin(ExtraAnnotationFormPlugin)
class AcceptConditionMixin:
render_template = 'shop/checkout/accept-condition.html'
def render(self, context, instance, placeholder):
"""
Return the context to render a checkbox used to accept the terms and conditions
"""
request = context['request']
try:
cart = CartModel.objects.get_from_request(request)
cart.update(request)
except CartModel.DoesNotExist:
cart = None
request._plugin_order = getattr(request, '_plugin_order', 0) + 1
try:
FormClass = import_string(app_settings.SHOP_CASCADE_FORMS['AcceptConditionForm'])
except ImportError:
msg = "Can not import Form class. Please check your settings directive SHOP_CASCADE_FORMS['AcceptConditionForm']."
raise ImproperlyConfigured(msg)
form_data = {'cart': cart, 'initial': dict(plugin_id=instance.pk, plugin_order=request._plugin_order)}
bound_form = FormClass(**form_data)
context[bound_form.form_name] = bound_form
super().render(context, instance, placeholder)
accept_condition_form = context['accept_condition_form.plugin_{}'.format(instance.pk)]
# transfer the stored HTML content into the widget's label
accept_condition_form['accept'].field.label = mark_safe(context['body'])
accept_condition_form['accept'].field.widget.choice_label = accept_condition_form['accept'].field.label # Django < 1.11
context['accept_condition_form'] = accept_condition_form
return context
class AcceptConditionPlugin(AcceptConditionMixin, TextPlugin):
name = _("Accept Condition")
module = "Shop"
def get_admin_url_name(self, name):
model_name = 'acceptcondition'
url_name = "%s_%s_%s" % ('shop', model_name, name)
return url_name
class AcceptConditionMinion(AcceptConditionMixin, TextStridePlugin):
pass
plugin_pool.register_plugin(AcceptConditionPlugin)
strides_plugin_map['AcceptConditionPlugin'] = AcceptConditionMinion
strides_element_map['AcceptConditionPlugin'] = TextStrideElement
class RequiredFormFieldsPlugin(ShopPluginBase):
"""
This plugin renders a short text message, emphasizing that fields with a star are required.
"""
name = _("Required Form Fields")
template_leaf_name = 'required-form-fields.html'
parent_classes = ('BootstrapColumnPlugin',)
def get_render_template(self, context, instance, placeholder):
template_names = [
'{0}/checkout/{1}'.format(app_settings.APP_LABEL, self.template_leaf_name),
'shop/checkout/{}'.format(self.template_leaf_name),
]
return select_template(template_names)
plugin_pool.register_plugin(RequiredFormFieldsPlugin)
class ValidateSetOfFormsPlugin(TransparentContainer, ShopPluginBase):
"""
This plugin wraps arbitrary forms into the Angular directive shopFormsSet.
This is required to validate all forms, so that a proceed button is disabled otherwise.
"""
name = _("Manage Set of Forms")
allow_children = True
alien_child_classes = True
def get_render_template(self, context, instance, placeholder):
return select_template([
'{}/checkout/forms-set.html'.format(app_settings.APP_LABEL),
'shop/checkout/forms-set.html',
])
plugin_pool.register_plugin(ValidateSetOfFormsPlugin)
| bsd-3-clause | d5d9d541629eee837f1c52f344b8d955 | 40.612658 | 133 | 0.683945 | 4.123683 | false | false | false | false |
awesto/django-shop | tests/test_enum.py | 1 | 2379 | import pytest
from django.db import models
from shop.models.fields import ChoiceEnum, ChoiceEnumField
class MyChoices(ChoiceEnum):
A = 0, "My choice A"
B = 1, "My choice B"
class MyColor(ChoiceEnum):
RED = '#ff0000', "Pure red"
BLUE = '#0000ff', "Pure blue"
class MyModel(models.Model):
f = ChoiceEnumField(enum_type=MyChoices)
class Meta:
app_label = 'shop'
managed = False
def test_int_enum():
choice_a = MyChoices.A
assert isinstance(choice_a, MyChoices)
assert MyChoices.B.name == 'B'
assert MyChoices.B.value == 1
assert MyChoices.B.label == "My choice B"
choice_b = MyChoices('B')
assert str(choice_b) == "My choice B"
assert MyChoices.default == MyChoices.A
assert MyChoices.choices == [(0, "My choice A"), (1, "My choice B")]
def test_str_enum():
red = MyColor.RED
assert isinstance(red, MyColor)
assert MyColor.BLUE.name == 'BLUE'
assert MyColor.BLUE.value == '#0000ff'
assert MyColor.BLUE.label == "Pure blue"
assert MyColor.BLUE == MyColor('#0000ff')
assert str(MyColor.BLUE) == "Pure blue"
assert MyColor.choices == [('#ff0000', "Pure red"), ('#0000ff', "Pure blue")]
def test_to_python():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.to_python(0) == MyChoices.A
assert f.to_python('A') == MyChoices.A
assert f.to_python(1) == MyChoices.B
with pytest.raises(ValueError):
f.to_python(None)
with pytest.raises(ValueError):
f.to_python(3)
def test_deconstruct():
f = ChoiceEnumField(enum_type=MyChoices)
name, path, args_, kwargs_ = f.deconstruct()
assert name is None
assert path == 'shop.models.fields.ChoiceEnumField'
assert args_ == []
assert kwargs_ == {}
def test_from_db_value():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.from_db_value(0, None, None) is MyChoices.A
assert f.from_db_value(1, None, None) is MyChoices.B
assert f.from_db_value(2, None, None) is 2
def test_get_prep_value():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.get_prep_value(MyChoices.A) is 0
assert f.get_prep_value(MyChoices.B) is 1
def test_value_to_string():
obj = MyModel(f=MyChoices.A)
assert ChoiceEnumField(name='f').value_to_string(obj) == 'A'
with pytest.raises(ValueError):
ChoiceEnumField(name='f').value_to_string(0)
| bsd-3-clause | 9ab708f7cc3449fde6c19fda5bc77a36 | 27.321429 | 81 | 0.651955 | 3.193289 | false | true | false | false |
lektor/lektor | tests/test_utils.py | 1 | 5284 | # coding: utf-8
import pytest
from lektor.utils import build_url
from lektor.utils import is_path_child_of
from lektor.utils import join_path
from lektor.utils import magic_split_ext
from lektor.utils import make_relative_url
from lektor.utils import parse_path
from lektor.utils import slugify
from lektor.utils import unique_everseen
def test_join_path():
assert join_path("a", "b") == "a/b"
assert join_path("/a", "b") == "/a/b"
assert join_path("a@b", "c") == "a@b/c"
assert join_path("a@b", "") == "a@b"
assert join_path("a@b", "@c") == "a@c"
assert join_path("a@b/c", "a@b") == "a/a@b"
assert join_path("blog@archive", "2015") == "blog@archive/2015"
assert join_path("blog@archive/2015", "..") == "blog@archive"
assert join_path("blog@archive/2015", "@archive") == "blog@archive"
assert join_path("blog@archive", "..") == "blog"
assert join_path("blog@archive", ".") == "blog@archive"
assert join_path("blog@archive", "") == "blog@archive"
# special behavior: parent of pagination paths is always the actual
# page parent.
assert join_path("/blog@1", "..") == "/"
assert join_path("/blog@2", "..") == "/"
# But joins on the same level keep the path
assert join_path("/blog@1", ".") == "/blog@1"
assert join_path("/blog@2", ".") == "/blog@2"
assert join_path("/blog@1", "") == "/blog@1"
assert join_path("/blog@2", "") == "/blog@2"
def test_is_path_child_of():
assert not is_path_child_of("a/b", "a/b")
assert is_path_child_of("a/b", "a/b", strict=False)
assert is_path_child_of("a/b/c", "a")
assert not is_path_child_of("a/b/c", "b")
assert is_path_child_of("a/b@foo/bar", "a/b@foo")
assert is_path_child_of("a/b@foo", "a/b@foo", strict=False)
assert not is_path_child_of("a/b@foo/bar", "a/c@foo")
assert not is_path_child_of("a/b@foo/bar", "a/c")
assert is_path_child_of("a/b@foo", "a/b")
assert is_path_child_of("a/b@foo/bar", "a/b@foo")
assert not is_path_child_of("a/b@foo/bar", "a/b@bar")
def test_magic_split_ext():
assert magic_split_ext("wow") == ("wow", "")
assert magic_split_ext("aaa.jpg") == ("aaa", "jpg")
assert magic_split_ext("aaa. jpg") == ("aaa. jpg", "")
assert magic_split_ext("aaa.j pg") == ("aaa.j pg", "")
assert magic_split_ext("aaa.j pg", ext_check=False) == ("aaa", "j pg")
def test_slugify():
assert slugify("w o w") == "w-o-w"
assert slugify("Șö prĕtty") == "so-pretty"
assert slugify("im age.jpg") == "im-age.jpg"
assert slugify("slashed/slug") == "slashed/slug"
def test_url_builder():
assert build_url([]) == "/"
assert build_url(["a", "b/c"]) == "/a/b/c/"
assert build_url(["a", "b/c"], trailing_slash=False) == "/a/b/c"
assert build_url(["a", "b/c.html"]) == "/a/b/c.html"
assert build_url(["a", "b/c.html"], trailing_slash=True) == "/a/b/c.html/"
assert build_url(["a", None, "b", "", "c"]) == "/a/b/c/"
def test_parse_path():
assert parse_path("") == []
assert parse_path("/") == []
assert parse_path("/foo") == ["foo"]
assert parse_path("/foo/") == ["foo"]
assert parse_path("/foo/bar") == ["foo", "bar"]
assert parse_path("/foo/bar/") == ["foo", "bar"]
assert parse_path("/foo/bar/../stuff") == ["foo", "stuff"]
@pytest.mark.parametrize(
"source, target, expected",
[
("/", "./a/", "a/"),
("/", "./a", "a"),
("/fr/blog/2015/11/a/", "/fr/blog/2015/11/a/a.jpg", "a.jpg"),
("/fr/blog/2015/11/a/", "/fr/blog/", "../../../"),
("/fr/blog/2015/11/a.php", "/fr/blog/", "../../"),
("/fr/blog/2015/11/a/", "/fr/blog/2016/", "../../../2016/"),
("/fr/blog/2015/11/a/", "/fr/blog/2016/c.jpg", "../../../2016/c.jpg"),
("/fr/blog/2016/", "/fr/blog/2015/a/", "../2015/a/"),
("/fr/blog/2016/", "/fr/blog/2015/a/d.jpg", "../2015/a/d.jpg"),
("/fr/blog/2015/11/a/", "/images/b.svg", "../../../../../images/b.svg"),
("/fr/blog/", "2015/11/", "2015/11/"),
("/fr/blog/x", "2015/11/", "2015/11/"),
("", "./a/", "a/"),
("", "./a", "a"),
("fr/blog/2015/11/a/", "fr/blog/2015/11/a/a.jpg", "a.jpg"),
("fr/blog/2015/11/a/", "fr/blog/", "../../../"),
("fr/blog/2015/11/a.php", "fr/blog/", "../../"),
("fr/blog/2015/11/a/", "fr/blog/2016/", "../../../2016/"),
("fr/blog/2015/11/a/", "fr/blog/2016/c.jpg", "../../../2016/c.jpg"),
("fr/blog/2016/", "fr/blog/2015/a/", "../2015/a/"),
("fr/blog/2016/", "fr/blog/2015/a/d.jpg", "../2015/a/d.jpg"),
("fr/blog/2015/11/a/", "images/b.svg", "../../../../../images/b.svg"),
("fr/blog/", "2015/11/", "../../2015/11/"),
("fr/blog/x", "2015/11/", "../../2015/11/"),
],
)
def test_make_relative_url(source, target, expected):
assert make_relative_url(source, target) == expected
def test_make_relative_url_relative_source_absolute_target():
with pytest.raises(ValueError):
make_relative_url("rel/a/tive/", "/abs/o/lute")
@pytest.mark.parametrize(
"seq, expected",
[
(iter(()), ()),
((2, 1, 1, 2, 1), (2, 1)),
((1, 2, 1, 2, 1), (1, 2)),
],
)
def test_unique_everseen(seq, expected):
assert tuple(unique_everseen(seq)) == expected
| bsd-3-clause | 7825aac5500c18c29f0ea8322b5667b8 | 36.190141 | 80 | 0.528877 | 2.763475 | false | true | false | false |
lektor/lektor | lektor/metaformat.py | 1 | 3252 | def _line_is_dashes(line):
line = line.strip()
return line == "-" * len(line) and len(line) >= 3
def _process_buf(buf):
for idx, line in enumerate(buf):
if _line_is_dashes(line):
line = line[1:]
buf[idx] = line
if buf and buf[-1][-1:] == "\n":
buf[-1] = buf[-1][:-1]
return buf[:]
def tokenize(iterable, interesting_keys=None, encoding=None):
"""This tokenizes an iterable of newlines as bytes into key value
pairs out of the lektor bulk format. By default it will process all
fields, but optionally it can skip values of uninteresting keys and
will instead yield `None`. The values are left as list of decoded
lines with their endings preserved.
This will not perform any other processing on the data other than
decoding and basic tokenizing.
"""
key = []
buf = []
want_newline = False
is_interesting = True
def _flush_item():
the_key = key[0]
if not is_interesting:
value = None
else:
value = _process_buf(buf)
del key[:], buf[:]
return the_key, value
if encoding is not None:
iterable = (x.decode(encoding, "replace") for x in iterable)
for line in iterable:
line = line.rstrip("\r\n") + "\n"
if line.rstrip() == "---":
want_newline = False
if key:
yield _flush_item()
elif key:
if want_newline:
want_newline = False
if not line.strip():
continue
if is_interesting:
buf.append(line)
else:
bits = line.split(":", 1)
if len(bits) == 2:
key = [bits[0].strip()]
if interesting_keys is None:
is_interesting = True
else:
is_interesting = key[0] in interesting_keys
if is_interesting:
first_bit = bits[1].strip("\t ")
if first_bit.strip():
buf = [first_bit]
else:
buf = []
want_newline = True
if key:
yield _flush_item()
def serialize(iterable, encoding=None):
"""Serializes an iterable of key value pairs into a stream of
string chunks. If an encoding is provided, it will be encoded into that.
This is primarily used by the editor to write back data to a source file.
"""
def _produce(item, escape=False):
if escape:
if _line_is_dashes(item):
item = "-" + item
if encoding is not None:
item = item.encode(encoding)
return item
for idx, (key, value) in enumerate(iterable):
value = value.replace("\r\n", "\n").replace("\r", "\n")
if idx > 0:
yield _produce("---\n")
if "\n" in value or value.strip("\t ") != value:
yield _produce(key + ":\n")
yield _produce("\n")
for line in value.splitlines(True):
yield _produce(line, escape=True)
yield _produce("\n")
else:
yield _produce("%s: %s\n" % (key, value))
| bsd-3-clause | a918a3054a2ff00d02255e41193c6dd4 | 29.971429 | 77 | 0.511993 | 4.196129 | false | false | false | false |
awesto/django-shop | shop/migrations/0001_initial.py | 2 | 2558 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
import filer.fields.file
class Migration(migrations.Migration):
dependencies = [
('filer', '0002_auto_20150606_2003'),
('post_office', '0002_add_i18n_and_backend_alias'),
]
if 'email_auth' in settings.INSTALLED_APPS:
dependencies.append(('email_auth', '0001_initial'))
customer_bases = ('email_auth.user',)
else:
dependencies.append(('auth', '0001_initial'))
customer_bases = ('auth.user',)
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Name')),
('transition_target', models.CharField(max_length=50, verbose_name='Event')),
('mail_to', models.PositiveIntegerField(default=None, null=True, verbose_name='Mail to', blank=True)),
('mail_template', models.ForeignKey(verbose_name='Template', to='post_office.EmailTemplate', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'Notification',
'verbose_name_plural': 'Notifications',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NotificationAttachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('attachment', filer.fields.file.FilerFileField(on_delete=models.CASCADE, related_name='email_attachment', blank=True, to='filer.File', null=True)),
('notification', models.ForeignKey(to='shop.Notification', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CustomerProxy',
fields=[
],
options={
'verbose_name': 'Customer',
'proxy': True,
'verbose_name_plural': 'Customers',
},
bases=customer_bases,
),
migrations.CreateModel(
name='Email',
fields=[
],
options={
'proxy': True,
},
bases=('post_office.email',),
),
]
| bsd-3-clause | 3a29dbf0a35a613d960fffcc40a1b30a | 36.617647 | 164 | 0.54183 | 4.503521 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/defs/iOS15/recurrent.py | 1 | 19876 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.input_type import (DefaultInputs,
InputSpec,
TensorInputType)
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
@register_op
class gru(Operation):
r"""
Gated recurrent unit (GRU).
.. math::
r_t = \rm{recurrent\_activation}(W_{ir} x_t + b_{ir} + W_{hr} h_{t-1} + b_{hr})
.. math::
z_t = \rm{recurrent\_activation}(W_{iz} x_t + b_{iz} + W_{hz} h_(t−1) + b_{hz})
.. math::
o_t = activation(W_{io} x_t + b_{io} + r_t * W_{ho} h_(t−1) + b_{ho})
.. math::
h_t = (1 − z_t) * o_t + z_t * h_{(t−1)}
Where:
* ``W_{ir}``, ``W_{io}``, and ``W_{iz}`` state input-hidden weight for reset, output
and update gate, respectively.
* ``W_{h[r|o|z]}`` are recurrent weights on hidden state to reset, output, update gate.
* ``h_t`` is the hidden state at time ``t``.
* ``x_t`` is the input at time ``t``.
* ``h_(t-1)`` is the hidden state of the layer at time ``t-1`` or the initial
hidden state at time ``0``.
* ``r_t``, ``o_t``, and ``z_t`` are the reset, new, and update gates, respectively.
* ``*`` is elementwise product.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, H, T> (Required)
* ``H`` denotes hidden size.
weight_ih: const<3*H, I, T> (Required) - Weight matrix
* ``weigh_ih = [W_{ir} | W_{io} | W_{iz}]`` where ``[a|b]`` denotes column
concatenation and ``[a, b]`` denotes row concatenation. ``W_{ir}``,
``W_{io}``, and ``W_{iz}`` have shape ``(H, I)``.
* This is used when direction="forward" or "reverse".
weight_hh: const<3*H, H, T> (Required) - Weight matrix
* ``weight_hh = [W_{hr} | W_{ho} | W_{hz}]``: ``W_{hr}``, ``W_{ho}``, and
``W_{hz}`` have shape ``(H, H)``.
* This is used when direction="forward" or "reverse".
bias: const<3*H, T> (Optional) [Default all 0s]
* ``bias[0]`` are input-hidden and hidden-hidden bias.
* ``3*H`` are biases for ``[b_{ir} | b_{io} | b_{hz}]``.
* This is used when direction="forward" or "reverse".
direction: const<str> (Optional) [Default=forward]
* Either ``forward`` or ``reverse``.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
recurrent_activation: const<str> (Optional) [Default=sigmoid]
* Activation applied on update and reset gate.
activation: const<str> (Optional) [Default=tanh]
* Activation applied on output gate.
Returns
-------
<s, b, H, T> or <1, b, H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, H, T>``.
* Else ``<1, b, H, T>`` (hidden states of the final step).
<b, H, T>
* Hidden states of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
initial_h=TensorInputType(type_domain="T"),
weight_ih=TensorInputType(const=True, type_domain="T"),
weight_hh=TensorInputType(const=True, type_domain="T"),
bias=TensorInputType(const=True, optional=True, type_domain="T"),
direction=TensorInputType(const=True, optional=True, type_domain=types.str),
output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool),
recurrent_activation=TensorInputType(const=True, optional=True, type_domain=types.str),
activation=TensorInputType(const=True, optional=True, type_domain=types.str)
)
type_domains = {
"T": (types.fp32,),
}
def default_inputs(self):
return DefaultInputs(
bias=None,
direction="forward",
output_sequence=False,
recurrent_activation="sigmoid",
activation="tanh",
)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.rank)
)
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight_ih.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight_ih.rank)
)
)
if self.weight_hh.rank != 2:
raise ValueError(
"Invalid weight shape. Expecting Rank 2 input, got {}".format(
len(self.weight_hh.rank)
)
)
hidden_dim, hidden_size = self.weight_hh.shape
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
dim_factor = 3
if hidden_size != (hidden_dim // dim_factor):
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided weight_ih {}, weight_hh {}. Expecting <b, 3*H>".format(
self.weight_ih.shape, self.weight_hh.shape
)
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
@register_op
class lstm(Operation):
r"""
Single long short-term memory (LSTM) sequence.
.. math::
i_t = \rm{recurrent\_activation}(W_{ii} x_t + B_{ii} + W_{hi} h_(t-1) + B_{hi})
.. math::
f_t = \rm{recurrent\_activation}(W_{if} x_t + B_{if} + W_{hf} h_(t-1) + B_{hf})
.. math::
z_t = cell_activation(W_{iz} x_t + B_{iz} + W_{hz} h_(t-1) + B_{hz})
.. math::
o_t = \rm{recurrent\_activation}(W_{io} x_t + B_{io} + W_{ho} h_(t-1) + B_{ho})
.. math::
c_t = f_t * c_(t-1) + i_t * z_t
.. math::
h_t = o_t * activation(c_t)
Where:
* ``i_t``, ``f_t``, ``o_t``, and ``z_t`` are input, forget, output, and cell gates,
respectively, at time ``t``.
* ``c_t`` is cell state at time ``t``.
* ``h_t`` is the hidden state at time ``t``.
* ``W_{ii}``, ``W_{if}``, ``W_{io}``, and ``W_{iz}`` are input weights for input,
forget, output and cell gate, respectively.
* ``W_{hi}``, ``W_{hf}``, ``W_{ho}``, and ``W_{hz}`` are recurrent weights for input,
forget, output and cell gate, respectively.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, DIRECTION*H, T> (Required)
* Initial hidden state. ``DIRECTION = 1`` for uni-directional, ``2`` for
bi-directional LSTM.
* ``H`` denotes hidden size.
* ``[b, :H]`` and ``[b, H:]`` represents forward and reverse direction
values, respectively.
initial_c: <b, DIRECTION*H, T> (Required)
* Initial cell state.
* Format is same as ``initial_h``.
weight_ih: const<4*H, I, T> (Required)
* Input-hidden weight matrix
* Weight tensor should be in order of
``[input_gate, forget_gate, output_gate, cell_gate]``.
* If direction=="bidirectional", this is applied in forward direction.
* If direction=="forward" or "backward" these weights are used.
weight_hh: const<4*H, H, T> (Required)
* Hidden-hidden weight matrix.
* Weight tensor should be in order of
``[input_gate, forget_gate, output_gate, cell_gate]``.
* If direction=="bidirectional", this is applied in forward direction.
* If direction=="forward" or "backward" these weights are used.
bias: const<4*H, T> (Optional) [Default all 0s]
* bias = input-hidden bias + hidden-hidden bias
* If direction=="bidirectional", this is applied in forward direction.
* If direction=="forward" or "backward" this bias are used.
peephole: const<3*H, T> (Optional, default to 0)
* Weight tensor for peephole.
* Order is ``[input_gate, forget_gate, output_gate]``.
* Shape of each peephole vector is ``(H,)`` (``H`` is hidden size).
* If direction=="bidirectional", this is applied in forward direction.
* If direction=="forward" or "backward" these weights are used.
weight_ih_back: const<4*H, I, T> (Optional) -
* Input-hidden weight matrix for backward direction for `bidirectinal LSTM`.
* Weight tensor should be in order of
``[input_gate, forget_gate, output_gate, cell_gate]``.
* Must be provided for `bidirectional LSTM`.
* This is only used when `direction` is "bidirectional".
* For direction="reverse" use `weight_ih` instead.
weight_hh_back: const<4*H, H, T> (Optional) - Hidden-hidden weight matrix
* Hidden-hidden weight matrix for backward direction for `bidirectinal LSTM`.
* Weight tensor should be in order of
``[input_gate, forget_gate, output_gate, cell_gate]``.
* Must be provided for `bidirectional LSTM`.
* This is only used when `direction` is "bidirectional".
* For direction="reverse" use `weight_hh` instead.
bias_back: const<4*H, T> (Optional) [Default all 0s]
* bias = input-hidden bias + hidden-hidden bias.
* Bias of backward direction for `bidirectional lstm`
* This is only used when `direction` is "bidirectional".
* For direction="reverse" use `bias` instead.
peephole_back: const<3*H, T> (Optional, default to 0)
* Weight tensor for peephole in backward direction for `bidirectional LSTM`.
* Order is ``[input_gate, forget_gate, output_gate]``.
* Shape of each peephole vector is ``(H,)`` (``H`` is hidden size).
* Peephole of backward direction for `bidirectional lstm`
* Bias of backward direction for `bidirectional lstm`
* This is only used when `direction` is "bidirectional".
* For direction="reverse" use `peephole` instead.
direction: const<str> (Optional) [Default=forward]
* One of the following: ``forward``, ``reverse``, or ``bidirectional``.
* Must match ``DIRECTIONAL`` in initial states and weight parameters.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
recurrent_activation: const<str> (Optional) [Default=sigmoid]
* Activation applied on input, forget, and output gates.
cell_activation: const<str> (Optional) [Default=tang]
* Activation applied on cell gate.
activation: const<str> (Optional) [Default=tanh]
* Activation applied on output gate.
clip: const<T> (optional) [Default=None]
* Cell gate is clipped to ``[-clip, +clip]``.
Returns
-------
<s, b, DIRECTION*H, T> or <1, b, DIRECTION*H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, DIRECTION*H, T>``.
* Else ``<1, b, DIRECTION*H, T>`` (hidden states of the final step).
<b, DIRECTION*H, T>
* Hidden states of the final step.
<b, DIRECTION*H, T>
* Memory state of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
initial_h=TensorInputType(type_domain="T"),
initial_c=TensorInputType(type_domain="T"),
weight_ih=TensorInputType(const=True, type_domain="T"), # ifoz layout,
weight_hh=TensorInputType(const=True, type_domain="T"), # ifoz layout
bias=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout
peephole=TensorInputType(const=True, optional=True, type_domain="T"), # ifo layout
weight_ih_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout,
weight_hh_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout
bias_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifoz layout
peephole_back=TensorInputType(const=True, optional=True, type_domain="T"), # ifo layout
direction=TensorInputType(const=True, optional=True, type_domain=types.str),
output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool),
recurrent_activation=TensorInputType(const=True, optional=True, type_domain=types.str),
cell_activation=TensorInputType(const=True, optional=True, type_domain=types.str),
activation=TensorInputType(const=True, optional=True, type_domain=types.str),
clip=TensorInputType(const=True, optional=True, type_domain="T"),
)
type_domains = {
"T": (types.fp32,),
}
def default_inputs(self):
return DefaultInputs(
bias=None,
direction="forward",
output_sequence=False,
recurrent_activation="sigmoid",
cell_activation="tanh",
activation="tanh",
peephole=None,
clip=None)
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
"Invalid input shape. Expecting Rank 3 input, got {}".format(
len(self.x.rank)
)
)
sequence_length, batch_size, input_size = self.x.shape
def weight_shape_check(wt_ih, wt_hh):
if wt_ih.rank != 2 or wt_hh.rank != 2:
raise ValueError(
"Expecting Rank 2 input, got weight_ih rank: {}, weight_hh rank: {}".format(
wt_ih.rank, wt_hh.rank
)
)
hidden_size = wt_hh.shape[1]
if wt_hh.shape[0] // hidden_size != 4 or wt_ih.shape[0] // hidden_size != 4:
raise ValueError(
"Incorrect weight matrix: hidden dim size mismatch. \
Provided weight_ih {}, weight_hh {}. Expecting <4*H, H>".format(
wt_ih.shape, wt_hh.shape
)
)
direction = self.direction.val
valid_directions = {"forward", "reverse", "bidirectional"}
if direction not in valid_directions:
raise ValueError(
"Direction {} not supported. Supported directions: {}".format(
direction, valid_directions
)
)
weight_shape_check(self.weight_ih, self.weight_hh)
if direction == "bidirectional":
weight_shape_check(self.weight_ih_back, self.weight_hh_back)
hidden_dim, hidden_size = self.weight_hh.shape
dim_factor = 8 if direction == "bidirectional" else 4
out_seq_len = sequence_length if self.output_sequence.val else 1
num_directions = dim_factor // 4
output_shape = [out_seq_len, batch_size, num_directions * hidden_size]
output_h_shape = [batch_size, num_directions * hidden_size]
output_c_shape = [batch_size, num_directions * hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
types.tensor(self.x.dtype, tuple(output_c_shape)),
)
@register_op
class rnn(Operation):
"""
Recurrent neural network (RNN).
.. math::
h_t = activation(W_{ih} x_t + b_{ih} + W_{hh} h_(t−1) + b_{hh})
Where:
* ``W_{ih}`` is input weight.
* ``W_{hh}`` is hidden/recurrent weight.
* ``h_t`` is the hidden state at time ``t``.
* ``x_t`` is the input at time ``t``.
* ``h_(t-1)`` is the hidden state of the layer at time ``t-1`` or the initial
hidden state at time ``0``.
Parameters
----------
x: <s, b, I, T> (Required)
* ``s`` is the sequence length, ``b`` is the batch size, and ``I`` is the
input dimension.
initial_h: <b, H, T> (Required)
* ``H`` denotes hidden size.
weight_ih: const<H, I, T> (Required) - Input-hidden weight matrix
weight_hh: const<H, H, T> (Required) - Hidden-hidden weight matrix
bias: const<H, T> (Optional) [Default all 0s]
* bias for input-hidden and hidden-hidden
direction: const<str> (Optional) [Default=forward]
* Either ``forward`` or ``reverse``.
output_sequence: const<bool> (Optional) [Default=False]
* Outputs every step if ``True``.
activation: const<str> (Optional) [Default=tanh]
* Supported activation functions: ``relu``, ``tanh``, ``sigmoid``,
``sigmoid_hard``, ``scaled_tanh``, and ``linear``.
Returns
-------
<s, b, H, T> or <1, b, H, T>
* If ``output_sequence == True`` (hidden states from every step):
``<s, b, H, T>``.
* Else ``<1, b, H, T>`` (hidden states of the final step).
<b, H, T>
* Hidden states of the final step.
Attributes
----------
T: fp32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
initial_h=TensorInputType(type_domain="T"),
weight_ih=TensorInputType(const=True, type_domain="T"),
weight_hh=TensorInputType(const=True, type_domain="T"),
bias=TensorInputType(const=True, optional=True, type_domain="T"),
direction=TensorInputType(const=True, optional=True, type_domain=types.str),
output_sequence=TensorInputType(const=True, optional=True, type_domain=types.bool),
activation=TensorInputType(const=True, optional=True, type_domain=types.str),
)
type_domains = {
"T": (types.fp32,),
}
def default_inputs(self):
return DefaultInputs(
bias=None,
direction="forward",
output_sequence=False,
activation="tanh")
def type_inference(self):
if self.x.rank != 3:
raise ValueError(
f"Invalid input shape. Expecting Rank 3 input, got {len(self.x.rank)}"
)
sequence_length, batch_size, input_size = self.x.shape
if self.weight_ih.rank != 2 or self.weight_hh.rank != 2:
raise ValueError(
f"Invalid weight shape. Expecting Rank 2 input, got weight_ih "
f"{self.weight_ih.rank}, weight_hh {self.weight_hh.rank}"
)
hidden_size, _ = self.weight_ih.shape
direction = self.direction.val
valid_directions = {"forward", "reverse"}
if direction not in valid_directions:
raise ValueError(
f"Direction {direction} not supported. Supported directions: {valid_directions}"
)
out_seq_len = sequence_length if self.output_sequence.val else 1
output_shape = [out_seq_len, batch_size, hidden_size]
output_h_shape = [batch_size, hidden_size]
return (
types.tensor(self.x.dtype, tuple(output_shape)),
types.tensor(self.x.dtype, tuple(output_h_shape)),
)
| bsd-3-clause | 24647127269ce0557184b16221bf9f98 | 37.952941 | 99 | 0.570422 | 3.644469 | false | false | false | false |
apple/coremltools | coremltools/proto/Imputer_pb2.py | 1 | 9313 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Imputer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pb2
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import DataStructures_pb2 as DataStructures__pb2
try:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2
except AttributeError:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2
from .DataStructures_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='Imputer.proto',
package='CoreML.Specification',
syntax='proto3',
serialized_pb=_b('\n\rImputer.proto\x12\x14\x43oreML.Specification\x1a\x14\x44\x61taStructures.proto\"\xf3\x03\n\x07Imputer\x12\x1c\n\x12imputedDoubleValue\x18\x01 \x01(\x01H\x00\x12\x1b\n\x11imputedInt64Value\x18\x02 \x01(\x03H\x00\x12\x1c\n\x12imputedStringValue\x18\x03 \x01(\tH\x00\x12@\n\x12imputedDoubleArray\x18\x04 \x01(\x0b\x32\".CoreML.Specification.DoubleVectorH\x00\x12>\n\x11imputedInt64Array\x18\x05 \x01(\x0b\x32!.CoreML.Specification.Int64VectorH\x00\x12J\n\x17imputedStringDictionary\x18\x06 \x01(\x0b\x32\'.CoreML.Specification.StringToDoubleMapH\x00\x12H\n\x16imputedInt64Dictionary\x18\x07 \x01(\x0b\x32&.CoreML.Specification.Int64ToDoubleMapH\x00\x12\x1c\n\x12replaceDoubleValue\x18\x0b \x01(\x01H\x01\x12\x1b\n\x11replaceInt64Value\x18\x0c \x01(\x03H\x01\x12\x1c\n\x12replaceStringValue\x18\r \x01(\tH\x01\x42\x0e\n\x0cImputedValueB\x0e\n\x0cReplaceValueB\x02H\x03P\x00\x62\x06proto3')
,
dependencies=[DataStructures__pb2.DESCRIPTOR,],
public_dependencies=[DataStructures__pb2.DESCRIPTOR,])
_IMPUTER = _descriptor.Descriptor(
name='Imputer',
full_name='CoreML.Specification.Imputer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='imputedDoubleValue', full_name='CoreML.Specification.Imputer.imputedDoubleValue', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imputedInt64Value', full_name='CoreML.Specification.Imputer.imputedInt64Value', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imputedStringValue', full_name='CoreML.Specification.Imputer.imputedStringValue', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imputedDoubleArray', full_name='CoreML.Specification.Imputer.imputedDoubleArray', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imputedInt64Array', full_name='CoreML.Specification.Imputer.imputedInt64Array', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imputedStringDictionary', full_name='CoreML.Specification.Imputer.imputedStringDictionary', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='imputedInt64Dictionary', full_name='CoreML.Specification.Imputer.imputedInt64Dictionary', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replaceDoubleValue', full_name='CoreML.Specification.Imputer.replaceDoubleValue', index=7,
number=11, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replaceInt64Value', full_name='CoreML.Specification.Imputer.replaceInt64Value', index=8,
number=12, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='replaceStringValue', full_name='CoreML.Specification.Imputer.replaceStringValue', index=9,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='ImputedValue', full_name='CoreML.Specification.Imputer.ImputedValue',
index=0, containing_type=None, fields=[]),
_descriptor.OneofDescriptor(
name='ReplaceValue', full_name='CoreML.Specification.Imputer.ReplaceValue',
index=1, containing_type=None, fields=[]),
],
serialized_start=62,
serialized_end=561,
)
_IMPUTER.fields_by_name['imputedDoubleArray'].message_type = DataStructures__pb2._DOUBLEVECTOR
_IMPUTER.fields_by_name['imputedInt64Array'].message_type = DataStructures__pb2._INT64VECTOR
_IMPUTER.fields_by_name['imputedStringDictionary'].message_type = DataStructures__pb2._STRINGTODOUBLEMAP
_IMPUTER.fields_by_name['imputedInt64Dictionary'].message_type = DataStructures__pb2._INT64TODOUBLEMAP
_IMPUTER.oneofs_by_name['ImputedValue'].fields.append(
_IMPUTER.fields_by_name['imputedDoubleValue'])
_IMPUTER.fields_by_name['imputedDoubleValue'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue']
_IMPUTER.oneofs_by_name['ImputedValue'].fields.append(
_IMPUTER.fields_by_name['imputedInt64Value'])
_IMPUTER.fields_by_name['imputedInt64Value'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue']
_IMPUTER.oneofs_by_name['ImputedValue'].fields.append(
_IMPUTER.fields_by_name['imputedStringValue'])
_IMPUTER.fields_by_name['imputedStringValue'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue']
_IMPUTER.oneofs_by_name['ImputedValue'].fields.append(
_IMPUTER.fields_by_name['imputedDoubleArray'])
_IMPUTER.fields_by_name['imputedDoubleArray'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue']
_IMPUTER.oneofs_by_name['ImputedValue'].fields.append(
_IMPUTER.fields_by_name['imputedInt64Array'])
_IMPUTER.fields_by_name['imputedInt64Array'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue']
_IMPUTER.oneofs_by_name['ImputedValue'].fields.append(
_IMPUTER.fields_by_name['imputedStringDictionary'])
_IMPUTER.fields_by_name['imputedStringDictionary'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue']
_IMPUTER.oneofs_by_name['ImputedValue'].fields.append(
_IMPUTER.fields_by_name['imputedInt64Dictionary'])
_IMPUTER.fields_by_name['imputedInt64Dictionary'].containing_oneof = _IMPUTER.oneofs_by_name['ImputedValue']
_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append(
_IMPUTER.fields_by_name['replaceDoubleValue'])
_IMPUTER.fields_by_name['replaceDoubleValue'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue']
_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append(
_IMPUTER.fields_by_name['replaceInt64Value'])
_IMPUTER.fields_by_name['replaceInt64Value'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue']
_IMPUTER.oneofs_by_name['ReplaceValue'].fields.append(
_IMPUTER.fields_by_name['replaceStringValue'])
_IMPUTER.fields_by_name['replaceStringValue'].containing_oneof = _IMPUTER.oneofs_by_name['ReplaceValue']
DESCRIPTOR.message_types_by_name['Imputer'] = _IMPUTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Imputer = _reflection.GeneratedProtocolMessageType('Imputer', (_message.Message,), dict(
DESCRIPTOR = _IMPUTER,
__module__ = 'Imputer_pb2'
# @@protoc_insertion_point(class_scope:CoreML.Specification.Imputer)
))
_sym_db.RegisterMessage(Imputer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003'))
# @@protoc_insertion_point(module_scope)
| bsd-3-clause | d417bb8bed4c92875385f3386a599a4d | 49.340541 | 909 | 0.745088 | 3.053443 | false | false | true | false |
apple/coremltools | deps/protobuf/python/google/protobuf/text_encoding.py | 146 | 4617 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Encoding related utilities."""
import re
import six
# Lookup table for utf8
_cescape_utf8_to_str = [chr(i) for i in range(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
_cescape_utf8_to_str[39] = r"\'" # optional escape
_cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] +
[chr(i) for i in range(32, 127)] +
[r'\%03o' % i for i in range(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
_cescape_byte_to_str[39] = r"\'" # optional escape
_cescape_byte_to_str[34] = r'\"' # necessary escape
_cescape_byte_to_str[92] = r'\\' # necessary escape
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, six.string_types) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] +
[r'\%03o' % i for i in range(127, 256)])
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
| bsd-3-clause | 3a8d0ce73f789dadb552f3a862011ec8 | 42.149533 | 79 | 0.693524 | 3.450673 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/passes/test_noop_elimination.py | 1 | 14858 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import numpy as np
import pytest
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.testing_utils import (
apply_pass_and_basic_check, assert_model_is_valid, get_op_types_in_program)
@pytest.mark.parametrize(
"op_type, pos, val", itertools.product(
['add', 'mul', 'floor_div', 'pow', 'real_div', 'sub'],
['x', 'y'],
[0., 1., [0., 0., 0., 0.], [1., 1., 1., 1.]]
)
)
def test_elementwise_elimination(op_type, pos, val):
if 'div' in op_type and np.prod(val) == 0:
return
if 'pow' in op_type and (val != 0 or val != 1):
return
test_op = getattr(mb, op_type)
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
if pos == "x":
r1 = test_op(x=val, y=x)
else:
r1 = test_op(x=x, y=val)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = [op_type, "relu"]
new_program = original_program
if op_type in {'add'}:
if val == 0. or val == [0., 0., 0., 0.]:
new_program = ["relu"]
elif op_type in {'mul'}:
if val == 1. or val == [1., 1., 1., 1.]:
new_program = ["relu"]
elif op_type in {'real_div'}:
if pos == 'y' and (val == 1. or val == [1., 1., 1., 1.]):
new_program = ["relu"]
elif op_type in {'pow', 'floor_div'}:
if pos == 'y' and (val == 1. or val == [1., 1., 1., 1.]):
new_program = ["relu"]
elif op_type in {'sub'}:
if pos == 'y' and (val == 0. or val == [0., 0., 0., 0.]):
new_program = ["relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == new_program
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_elementwise_broadcast():
@mb.program(input_specs=[mb.TensorSpec(shape=[4])])
def prog(x):
r1 = mb.add(x=x, y=[[0., 0., 0., 0.], [0., 0., 0., 0.]])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
original_program = ["add", "relu"]
assert get_op_types_in_program(prev_prog) == original_program
assert get_op_types_in_program(prog) == original_program
assert_model_is_valid(
prog,
{"x": [4]},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_elementwise_elimination_fill():
"""
When fill layer with dynamic shape is fed to elementwise-binary operation,
even though the tensor can't be materialized at conversion time but no-op
elimination can still be performed based on fill-value
"""
@mb.program(input_specs=[mb.TensorSpec(shape=(2, get_new_symbol()))])
def prog(x):
shape = mb.shape(x=x)
y = mb.fill(value=0.0, shape=shape)
x = mb.add(x=x, y=y)
return mb.relu(x=x)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["shape", "fill", "add", "relu"]
assert get_op_types_in_program(prog) == ["shape", "fill", "relu"]
apply_pass_and_basic_check(prog, "common::dead_code_elimination")
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_reshape_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.reshape(x=x, shape=[1, 8])
mb.reshape(x=r1, shape=[1, 8])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["reshape", "reshape", "relu"]
assert get_op_types_in_program(prog) == ["reshape", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (1, 8)},
)
def test_oneway_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, num_splits=1, axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_full_split_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.split(x=x, split_sizes=[4], axis=-1)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["split", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebysize_to_end_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_size(x=x, begin=[0, 0], size=[-1, -1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_size", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebyindex_full_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_index(x=x, begin=[0, 0], end=[2, 4])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_slicebyindex_negative_stride():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.slice_by_index(
x=x,
begin=[0, 0],
end=[0, 0],
stride=[1, -1],
begin_mask=[True, True],
end_mask=[True, True]
)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["slice_by_index", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
@pytest.mark.parametrize("begin_mask, end_mask",
itertools.product(itertools.product([True, False],[True, False]),
itertools.product([True, False],[True, False])))
def test_slicebyindex_mask_elimination(begin_mask, end_mask):
@mb.program(input_specs=[mb.TensorSpec(shape=(4, 4))])
def prog(x):
begin = [1, 1]
end = [1, 1]
for i in range(2):
if not begin_mask[i]:
begin[i] = 0
if not end_mask[i]:
end[i] = 4
r1 = mb.slice_by_index(x=x, begin=begin, end=end, begin_mask=begin_mask, end_mask=end_mask)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["slice_by_index", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (4, 4)},
expected_output_shapes={block.outputs[0].name: (4, 4)},
)
def test_pad_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[0, 0, 0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_pad():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.pad(x=x, pad=[4, 4, 2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["pad", "relu"]
assert get_op_types_in_program(prog) == ["pad", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (10, 8)},
)
def test_tile_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[1, 1])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_keep_tile():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.tile(x=x, reps=[2, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["tile", "relu"]
assert get_op_types_in_program(prog) == ["tile", "relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (4, 8)},
)
def test_upsample_nearest_neighbor_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_nearest_neighbor(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_nearest_neighbor", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_upsample_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.upsample_bilinear(x=x)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["upsample_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_resize_bilinear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.resize_bilinear(x=x, target_size_height=2, target_size_width=4)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["resize_bilinear", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_crop_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(3, 2, 4))])
def prog(x):
r1 = mb.crop(x=x, crop_height=[0, 0], crop_width=[0, 0])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["crop", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (3, 2, 4)},
expected_output_shapes={block.outputs[0].name: (3, 2, 4)},
)
def test_linear_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 4))])
def prog(x):
r1 = mb.linear_activation(x=x, alpha=1.0, beta=0.0)
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["linear_activation", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 4)},
expected_output_shapes={block.outputs[0].name: (2, 4)},
)
def test_transpose_elimination():
@mb.program(input_specs=[mb.TensorSpec(shape=(2, 3, 4))])
def prog(x):
r1 = mb.transpose(x=x, perm=[0, 1, 2])
return mb.relu(x=r1)
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::noop_elimination"
)
assert get_op_types_in_program(prev_prog) == ["transpose", "relu"]
assert get_op_types_in_program(prog) == ["relu"]
assert_model_is_valid(
prog,
{"x": (2, 3, 4)},
expected_output_shapes={block.outputs[0].name: (2, 3, 4)},
)
| bsd-3-clause | 14ce64c69e62f53051c586a22d9b047e | 31.37037 | 100 | 0.569929 | 2.964485 | false | true | false | false |
apple/coremltools | coremltools/converters/sklearn/_normalizer.py | 1 | 2315 | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from ... import SPECIFICATION_VERSION
from ..._deps import _HAS_SKLEARN
from ...models import MLModel as _MLModel
from ...models._interface_management import \
set_transform_interface_params as _set_transform_interface_params
from ...proto import Model_pb2 as _Model_pb2
from ...proto.Normalizer_pb2 import Normalizer as _proto__normalizer
if _HAS_SKLEARN:
from sklearn.preprocessing import Normalizer
from . import _sklearn_util
sklearn_class = Normalizer
model_type = "transformer"
def convert(model, input_features, output_features):
"""Convert a normalizer model to the protobuf spec.
Parameters
----------
model: Normalizer
A Normalizer.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
"""
if not (_HAS_SKLEARN):
raise RuntimeError(
"scikit-learn not found. scikit-learn conversion API is disabled."
)
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, Normalizer)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, "norm"))
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params(spec, input_features, output_features)
# Set the one hot encoder parameters
_normalizer_spec = spec.normalizer
if model.norm == "l1":
_normalizer_spec.normType = _proto__normalizer.L1
elif model.norm == "l2":
_normalizer_spec.normType = _proto__normalizer.L2
elif model.norm == "max":
_normalizer_spec.normType = _proto__normalizer.LMax
return _MLModel(spec)
def update_dimension(model, input_dimension):
"""
Given a model that takes an array of dimension input_dimension, returns
the output dimension.
"""
# No change
return input_dimension
def get_input_dimension(model):
# Cannot determine this now.
return None
| bsd-3-clause | 43b24cec8ca0770ec7c42d77a2d2462c | 27.231707 | 82 | 0.682073 | 3.839138 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/defs/iOS15/pool.py | 1 | 9106 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.block import curr_opset_version
from coremltools.converters.mil.mil.input_type import (DefaultInputs,
InputSpec,
TensorInputType)
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.ops.defs._utils import \
spatial_dimensions_out_shape
from coremltools.converters.mil.mil.ops.defs.iOS15 import _IOS15_TARGET
class Pooling(Operation):
"""
Pooling Op Superclass
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
kernel_sizes=TensorInputType(const=True, type_domain=types.int32),
strides=TensorInputType(const=True, optional=True, type_domain=types.int32),
pad_type=TensorInputType(const=True, type_domain=types.str),
pad=TensorInputType(const=True, optional=True, type_domain=types.int32),
ceil_mode=TensorInputType(const=True, optional=True, type_domain=types.bool),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def default_inputs(self):
num_spatial_dims = self.x.rank - 2
return DefaultInputs(
strides=[1] * num_spatial_dims,
pad=[0] * 2 * num_spatial_dims,
ceil_mode=False,
)
def type_inference(self):
ksize = self.kernel_sizes.val
x_shape = self.x.shape
D_in_rank = len(x_shape) - 2
strides = [1] * D_in_rank if self.strides is None else self.strides.val
pad_type = "valid" if self.pad_type is None else self.pad_type.val.lower()
if pad_type not in ["valid", "same", "custom", "same_lower"]:
raise ValueError("Unrecognized value of pad_type : {}".format(pad_type))
pad = None if self.pad is None else self.pad.val
D_in = x_shape[2:] # spatial dimensions
if self.ceil_mode.val:
if D_in_rank > 2:
raise ValueError('pool: ceil_mode only supported for 1D or 2D pool')
if pad_type == "same" and self.ceil_mode.val:
raise ValueError("ceil_mode must be False when pad_type==same")
if pad is not None:
for i in range(D_in_rank):
if pad[2 * i] != pad[2 * i + 1]:
raise ValueError("Padding must be symmetric if ceil_mode is True")
# The same_lower padding is not supported in iOS15
if curr_opset_version() == _IOS15_TARGET and self.pad_type.val == "same_lower":
msg = "iOS15 version of pooling layers do not support pad_type = `same_lower`"
raise ValueError(msg)
D_out_shape = spatial_dimensions_out_shape(
pad_type=pad_type,
input_shape=D_in,
kernel_shape=ksize,
strides=strides,
custom_pad=pad,
ceil_mode=self.ceil_mode.val,
)
ret_shape = list(x_shape[:2]) + D_out_shape
return types.tensor(self.x.dtype, tuple(ret_shape))
@register_op
class avg_pool(Pooling):
"""
Perform average pooling. Supports 1-D, 2-D, and 3-D pool (1, 2, or 3 spatial dimensions).
Parameters
----------
x: tensor<[n,C_in,\*D_in], T> (Required)
* ``3 <= rank <= 5``.
* ``D_in`` are spatial dimensions, ``1 <= len(D_in) <= 3``.
* ``C_in`` is the number of input channels or depth dimensions.
* ``n`` is the batch dimension.
kernel_sizes: const tensor<[K], T> (Required)
* The size of the window for each spatial dimension ``D_in`` of the
input tensor.
* ``K == len(D_in)``
strides: const tensor<[S],i32> (Optional, default to all 1s)
* Stride along each of the spatial dimensions.
* ``S == len(D_in)``.
pad_type: const str (Required)
Must be one of ``valid``, ``same`` or ``custom``.
* ``valid``: No padding. This is equivalent to custom pad with ``pad[i] = 0, for
all i``.
* ``same`` : This is equivalent to custom pad with ``pad[2*i] + pad[2*i+1] = kernel_size[i]``.
* ``custom``: Specify custom padding in the parameter pad. note that ``same``
padding is equivalent to custom padding with
``pad[2*i] + pad[2*i+1] = kernel_size[i]``.
* ``same_lower``: Similar to ``same`` but the padding
will place extra rows/cols on the top/left if the padding amount is odd.
pad: const<[P],i32> (Optional. Default to all 0s)
* ``pad`` represents the number of elements to pad before and after each
dimension: ``pad[2*i], pad[2*i+1]`` are the pad size before and after spatial
dimension ``i``.
* ``P = 2 * len(D_in)``.
* ``pad`` should be specified if and only if ``pad_type == custom``
exclude_padding_from_average: const tensor<[], bool> (Optional, default to False)
* If ``True``, padded values (0s) are excluded from the denominator count
when computing the average over the kernel window.
ceil_mode: const<bool>
* Same as PyTorch's ``ceil`` mode.
* ``ceil`` is used instead of floor in calculating the output size.
* Optional, defaults to ``False``.
* Only applicable when ``pad_type`` is ``valid`` or ``custom``.
* When ``ceil_mode`` is True, padding must be symmetric; that is, if specified,
``pad[2*i] == pad[2*i+1]`` must hold.
Returns
-------
tensor<[n, C_out,\*D_out], T>
* Same rank as ``x``.
* When ``ceil_mode = False``:
* ``D_out[i] = floor[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i]) /
strides[i]] +1, for i = 0, .., len(D_in) - 1`` is mathematically the same
as (when all parameters involved are integers):
* ``D_out[i] = ceil [(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_size[i] - 1) / stride[i]], for i = 0, .., len(D_in) - 1``.
* ``*D_out`` is all ones if ``global_pooling`` is ``true``.
* When ``ceil_mode = True``:
* ``D_out[i] = ceil[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i]) / strides[i]] +1, for i = 0, .., len(D_in) - 1``
* If ``(D_out[i] - 1) * strides[i] >= D_in[i] + pad[2*i] and (pad[2*i] + pad[2*i+1] > 0)``
then ``D_out[i] = D_out[i] - 1``.
* The first equation is same as:
* ``D_out[i] = floor[(D_in[i] + pad[2*i] + pad[2*i+1] - kernel_sizes[i] + strides[i] - 1) / strides[i]] +1, for i = 0, .., len(D_in) - 1``
Attributes
----------
T: fp16, fp32
See Also
--------
l2_pool, max_pool
"""
input_spec = (
InputSpec(
exclude_padding_from_average=TensorInputType(
const=True, optional=True, type_domain=types.bool
)
)
+ Pooling.input_spec
)
def default_inputs(self):
return super().default_inputs() + DefaultInputs(
exclude_padding_from_average=False,
)
@register_op
class l2_pool(Pooling):
"""
Perform L2 pooling. Supports 1-D and 2-D pool.
Parameters
----------
x: tensor<[n,C_in,*D_in], T> (Required)
* Only support 1d and 2d pooling.
* See ``avg_pool``.
kernel_sizes: const tensor<[K], T> (Required)
* See ``avg_pool``.
strides: const tensor<[S],i32> (Optional, default to all 1s)
* See ``avg_pool``.
pad_type: const str (Required)
* See ``avg_pool``.
pad: const<[P],i32> (Optional, default to all 0s)
* See ``avg_pool``.
Returns
-------
tensor<[n, C_out,*D_out], T>
* See ``avg_pool``.
Attributes
----------
T: fp16, fp32
See Also
--------
avg_pool, max_pool
"""
def type_inference(self):
if self.x.rank - 2 > 2:
msg = "l2_pool only supports rank 1 or 2. Got rank: {}".format(self.x.rank - 2)
raise ValueError(msg)
return super().type_inference()
@register_op
class max_pool(Pooling):
"""
Perform max pooling. Supports 1-D, 2-D, and 3-D pool.
Parameters
----------
x: tensor<[n,C_in,*D_in], T> (Required)
* See ``avg_pool``.
kernel_sizes: const tensor<[K], T> (Required)
* See ``avg_pool``.
strides: const tensor<[S],i32> (Optional, default to all 1s)
* See ``avg_pool``.
pad_type: const str (Required)
* See ``avg_pool``.
pad: const<[P],i32> (Optional, default to all 0s)
* See ``avg_pool``.
ceil_mode: const<bool>
* see ``avg_pool``.
Returns
-------
tensor<[n, C_out,*D_out], T>
* See ``avg_pool``.
Attributes
----------
T: fp16, fp32
See Also
--------
avg_pool, l2_pool
"""
pass
| bsd-3-clause | 53d63e56a004cd7da07bd6654f25b37f | 33.623574 | 154 | 0.550736 | 3.410487 | false | false | false | false |
apple/coremltools | coremltools/test/pipeline/test_model_updatable.py | 1 | 28313 | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import shutil
import tempfile
import unittest
import numpy as _np
import coremltools.models.datatypes as datatypes
from coremltools.models import MLModel
from coremltools.models.neural_network import (AdamParams,
NeuralNetworkBuilder, SgdParams,
quantization_utils)
from coremltools.models.pipeline import PipelineClassifier, PipelineRegressor
from coremltools.models.utils import save_spec
class LayerSelector(quantization_utils.QuantizedLayerSelector):
def __init__(self, layer_name):
super(LayerSelector, self).__init__()
self.layer_name = layer_name
def do_quantize(self, layer, weight_param="bias"):
ret = super(LayerSelector, self).do_quantize(layer)
if not ret or layer.name == self.layer_name:
return False
return True
class MLModelUpdatableTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.model_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(self):
if os.path.exists(self.model_dir):
shutil.rmtree(self.model_dir)
def create_base_builder(self, is_updatable=True):
self.input_features = [("input", datatypes.Array(3))]
self.output_features = [("output", None)]
self.output_names = ["output"]
builder = NeuralNetworkBuilder(self.input_features, self.output_features)
W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
builder.add_inner_product(
name="ip1",
W=W1,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="input",
output_name="hidden",
)
builder.add_inner_product(
name="ip2",
W=W2,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="hidden",
output_name="output",
)
if is_updatable:
builder.make_updatable(["ip1", "ip2"])
return builder
def test_updatable_model_creation_ce_sgd(self):
builder = self.create_base_builder()
builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="softmax_output"
)
builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0))
builder.set_epochs(20, allowed_set=[10, 20, 30, 40])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue,
0,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values
== [10]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue
== 1
)
def test_updatable_model_creation_ce_adam(self):
builder = self.create_base_builder()
builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="softmax_output"
)
adam_params = AdamParams()
adam_params.set_batch(value=10, allowed_set=[10, 20])
builder.set_adam_optimizer(adam_params)
builder.set_epochs(20)
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue,
0.9,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue,
0.999,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue,
1e-8,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values
== [10, 20]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue
== 1
)
self.assertTrue(spec.neuralNetwork.updateParams.epochs.set.values == [20])
def test_updatable_model_creation_mse_sgd(self):
builder = self.create_base_builder()
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0))
builder.set_epochs(20)
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue,
0,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values
== [10]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue
== 1
)
def test_updatable_model_creation_mse_adam(self):
builder = self.create_base_builder()
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_adam_optimizer(
AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)
)
builder.set_epochs(20, allowed_set=[10, 20, 30])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue,
0.9,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue,
0.999,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue,
1e-8,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values
== [10]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.epochs.set.values == [10, 20, 30]
)
def test_nn_set_cce_without_softmax_fail(self):
nn_builder = self.create_base_builder()
# fails since adding CCE without softmax must raise error
with self.assertRaises(ValueError):
nn_builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="output"
)
def test_nn_set_cce_invalid(self):
nn_builder = self.create_base_builder()
nn_builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
# fails since CCE input must be softmax output
with self.assertRaises(ValueError):
nn_builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="output"
)
def test_nn_set_softmax_updatable_invalid(self):
nn_builder = self.create_base_builder()
nn_builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
# fails since marking softmax as updatable layer is not allowed
with self.assertRaises(ValueError):
nn_builder.make_updatable(["softmax"])
def test_nn_set_training_input(self):
builder = self.create_base_builder()
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_adam_optimizer(
AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)
)
builder.set_epochs(20, allowed_set=[10, 20, 30])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertEqual(spec.description.trainingInput[0].name, "input")
self.assertEqual(
spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType"
)
self.assertEqual(spec.description.trainingInput[1].name, "output_true")
self.assertEqual(
spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType"
)
def test_nn_builder_with_training_features(self):
input_features = [("input", datatypes.Array(3))]
output_features = [("output", datatypes.Array(3))]
builder = NeuralNetworkBuilder(input_features, output_features)
W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
builder.add_inner_product(
name="ip1",
W=W1,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="input",
output_name="hidden",
)
builder.add_inner_product(
name="ip2",
W=W2,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="hidden",
output_name="output",
)
builder.make_updatable(["ip1", "ip2"]) # or a dict for weightParams
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_adam_optimizer(
AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)
)
builder.set_epochs(20, allowed_set=[10, 20, 30])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertEqual(spec.description.trainingInput[0].name, "input")
self.assertEqual(
spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType"
)
self.assertEqual(spec.description.trainingInput[1].name, "output_true")
self.assertEqual(
spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType"
)
def test_nn_fp16_make_updatable_fail(self):
nn_builder = self.create_base_builder(is_updatable=False)
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
save_spec(nn_builder.spec, model_path)
mlmodel = MLModel(model_path)
quantized_result = quantization_utils.quantize_weights(mlmodel, 16, "linear")
q_nn_builder = NeuralNetworkBuilder(spec=quantized_result._spec)
# fails since an FP16 model cannot be marked updatable
with self.assertRaises(ValueError):
q_nn_builder.make_updatable(["ip1", "ip2"])
def test_nn_partial_fp16_make_updatable_fail(self):
nn_builder = self.create_base_builder(is_updatable=False)
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(nn_builder.spec, model_path)
mlmodel = MLModel(model_path)
selector = LayerSelector(layer_name='ip1')
quantized_model = quantization_utils.quantize_weights(mlmodel, 16, "linear", selector=selector)
q_nn_builder = NeuralNetworkBuilder(spec=quantized_model._spec)
# fails since model has a layer with FP16 bias
with self.assertRaises(ValueError):
q_nn_builder.make_updatable(["ip2"])
def test_nn_partial_fp16_make_updatable_quantized_layer_fail(self):
nn_builder = self.create_base_builder(is_updatable=False)
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(nn_builder.spec, model_path)
mlmodel = MLModel(model_path)
selector = LayerSelector(layer_name='ip2')
quantized_result = quantization_utils.quantize_weights(mlmodel, 16, "linear", selector=selector)
quantized_spec = quantized_result._spec
q_nn_builder = NeuralNetworkBuilder(spec=quantized_spec)
# fails since model has a layer with FP16 bias
with self.assertRaises(ValueError):
q_nn_builder.make_updatable(["ip2"])
def test_nn_partial_fp16_make_updatable_fail(self):
nn_builder = self.create_base_builder()
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(nn_builder.spec, model_path)
mlmodel = MLModel(model_path)
# fails since updatable models cannot get quantized to FP16
with self.assertRaises(Exception):
quantization_utils.quantize_weights(mlmodel, 16, "linear")
def test_pipeline_regressor_make_updatable(self):
builder = self.create_base_builder()
builder.spec.isUpdatable = False
training_input = [("input", datatypes.Array(3)), ("target", "Double")]
# fails due to missing sub-models
p_regressor = PipelineRegressor(
self.input_features, self.output_names, training_input
)
with self.assertRaises(ValueError):
p_regressor.make_updatable()
self.assertEqual(p_regressor.spec.isUpdatable, False)
# fails due to sub-model being not updatable
p_regressor.add_model(builder.spec)
with self.assertRaises(ValueError):
p_regressor.make_updatable()
self.assertEqual(p_regressor.spec.isUpdatable, False)
builder.spec.isUpdatable = True
p_regressor.add_model(builder.spec)
self.assertEqual(p_regressor.spec.isUpdatable, False)
p_regressor.make_updatable()
self.assertEqual(p_regressor.spec.isUpdatable, True)
self.assertEqual(p_regressor.spec.description.trainingInput[0].name, "input")
self.assertEqual(
p_regressor.spec.description.trainingInput[0].type.WhichOneof("Type"),
"multiArrayType",
)
self.assertEqual(p_regressor.spec.description.trainingInput[1].name, "target")
self.assertEqual(
p_regressor.spec.description.trainingInput[1].type.WhichOneof("Type"),
"doubleType",
)
# fails since once updatable does not allow adding new models
with self.assertRaises(ValueError):
p_regressor.add_model(builder.spec)
self.assertEqual(p_regressor.spec.isUpdatable, True)
def test_pipeline_classifier_make_updatable(self):
builder = self.create_base_builder()
builder.spec.isUpdatable = False
training_input = [("input", datatypes.Array(3)), ("target", "String")]
# fails due to missing sub-models
p_classifier = PipelineClassifier(
self.input_features, self.output_names, training_features=training_input
)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
# fails due to sub-model being not updatable
p_classifier.add_model(builder.spec)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
builder.spec.isUpdatable = True
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, False)
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, True)
self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input")
self.assertEqual(
p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"),
"multiArrayType",
)
self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target")
self.assertEqual(
p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"),
"stringType",
)
# fails since once updatable does not allow adding new models
with self.assertRaises(ValueError):
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, True)
def test_pipeline_classifier_set_training_inputs(self):
builder = self.create_base_builder()
builder.spec.isUpdatable = False
training_input = [("input", datatypes.Array(3)), ("target", "String")]
# fails due to missing sub-models
p_classifier = PipelineClassifier(self.input_features, self.output_names)
p_classifier.set_training_input(training_input)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
# fails due to sub-model being not updatable
p_classifier.add_model(builder.spec)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
builder.spec.isUpdatable = True
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, False)
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, True)
self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input")
self.assertEqual(
p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"),
"multiArrayType",
)
self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target")
self.assertEqual(
p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"),
"stringType",
)
# fails since once updatable does not allow adding new models
with self.assertRaises(ValueError):
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, True)
def test_shuffle_on_by_default(self):
builder = self.create_base_builder()
# base builder already marks two layers as updatable
self.assertTrue(
builder.nn_spec.updateParams.shuffle.defaultValue,
"Shuffle not turned on by default for updatable models",
)
| bsd-3-clause | 8d9b5817f1b99c786d89d1a8e624241f | 34.569095 | 104 | 0.606612 | 3.929087 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/insert_get_tuple.py | 1 | 3398 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import copy
from ..parsed_tf_node import ParsedTFNode
def insert_get_tuple(gddict):
"""
TensorFlow uses input "nodename:i" to denote "get tuple i" from "nodename".
Here we split it so that:
node1:i -> node2
gets transformed into
node1 -> get_tuple(i) --> node2
Takes a graph in "dict{str, ParsedTFNode}" form, and returns a new graph.
We do not do this for control flow nodes(Switch, Enter, Exit, Merge
LoopCond, NextIteration). For these nodes, we just convert
node1:i -> node2
to
node1 -> node2
"""
retdict = {}
get_tuple_op_var_index = 1
inserted_ops = {}
def make_op(input_node, index, new_node_name, gto_make_op_cache):
cache_key = (
input_node,
index,
)
if cache_key in gto_make_op_cache:
return gto_make_op_cache[cache_key]
inserted_op_name = new_node_name
inserted_op = ParsedTFNode()
inserted_op.name = inserted_op_name
inserted_op.op = "get_tuple"
inserted_op.inputs = [input_node]
inserted_op.attr["index"] = index
inserted_ops[inserted_op_name] = inserted_op
gto_make_op_cache[cache_key] = inserted_op
return inserted_op
exclusions = [
"Switch",
"Enter",
"Exit",
"Merge",
"LoopCond",
"NextIteration",
"TensorArrayV3",
"Const",
]
inclusions = ["IdentityN", "Split", "SplitV", "LSTMBlockCell", "TopK", "TopKV2", "Unpack", "BlockLSTM", "BlockLSTMV2", "NonMaxSuppressionV5"]
gto_make_op_cache = {}
for name in list(gddict.keys()):
new_node = ParsedTFNode()
new_node = copy.deepcopy(gddict[name])
new_inputs = []
for idx in range(len(new_node.inputs)):
if ":" in new_node.inputs[idx]:
input_node, input_index = new_node.inputs[idx].split(":")
else:
input_node = new_node.inputs[idx]
input_index = 0
if (
"_output_shapes" in gddict[input_node].attr
and len(gddict[input_node].attr["_output_shapes"]) > 1
and gddict[input_node].op not in exclusions
) or (gddict[input_node].op in inclusions):
get_tuple_node_name = "gto_%s" % (get_tuple_op_var_index)
new_inputs.append(
make_op(
input_node,
int(input_index),
get_tuple_node_name,
gto_make_op_cache,
).name
)
get_tuple_op_var_index += 1
else:
new_inputs.append(new_node.inputs[idx])
new_node.inputs = new_inputs
retdict[name] = new_node
for k, v in inserted_ops.items():
retdict[k] = v
# Force fix up the remaining node names by dropping the :
#
for k, v in retdict.items():
for idx in range(len(v.inputs)):
if ":" in v.inputs[idx]:
nodename, nodeindex = v.inputs[idx].split(":")
v.inputs[idx] = nodename
return retdict
| bsd-3-clause | 2b9d1682f56165ea6793cf8f13437a6a | 29.612613 | 145 | 0.544732 | 3.653763 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/frontend/tensorflow/tf_graph_pass/functionalize_loops.py | 1 | 19047 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools import _logger as logger
from ..basic_graph_ops import (connect_dests, connect_edge, connect_sources,
delete_node, disconnect_edge, replace_dest,
replace_source)
from ..parsed_tf_node import ParsedTFNode
from ..tfssa import SSAFunction
from .visitors import (FindAllReachableNodes, FindImmediateDownstreamNodes,
FindImmediateUpstreamNodes, FindSubgraph)
class FunctionalizeLoops:
"""
Turns while loops in TensorFlow dataflow graph into the functional form:
while(cond_function, body_function)
Usage:
Given a graph in tfssa (the NetworkEnsemble defined in network.py) form:
This will functionalize *ONE* loop in the main function.
f = FunctionalizeLoops()
ret = f.functionalize_loops(self, tfssa, "main")
if ret is True, one loop has been functionalized, and the new functions
added to tfssa. If False, there is no loop to functionalize.
Generally, repeated calls to this will be necessary to catch all loops.
Instead, use functionalize_loops.
"""
def __init__(self):
self.exits = None
self.merges = None
self.enters = None
self.constant_enters = None
self.switches = None
self.subgraph = None
self.loopcond = None
self.is_constant = None
self.next_iterations = None
self.cond = None
self.body = None
def _search(self, g, node):
if not isinstance(node, ParsedTFNode):
node = g[node]
# we look for NextIteration nodes
assert node.op == "Enter"
frame_name = node.attr["frame_name"]
logger.debug("Fixing frame name: %s", frame_name)
# find all the enter args
# this is basically the enter frame
# functionalize_control_flow.cc:FunctionalizeControlFlow (1160-1196)
self.enters = [
k for k, v in g.items() if v.attr.get("frame_name", "") == frame_name
]
self.is_constant = [
bool(g[n].attr.get("is_constant", False)) for n in self.enters
]
self.merges = (
FindImmediateDownstreamNodes(lambda x: x.op == "Merge")
.visit_many(g, self.enters)
.get_result()
)
self.next_iterations = (
FindImmediateUpstreamNodes(lambda x: x.op == "NextIteration")
.visit_many(g, self.merges)
.get_result()
)
self.switches = (
FindImmediateDownstreamNodes(lambda x: x.op == "Switch")
.visit_many(g, self.merges)
.get_result()
)
self.exits = (
FindImmediateDownstreamNodes(lambda x: x.op == "Exit")
.visit_many(g, self.switches)
.get_result()
)
self.loopcond = list(
set(
FindImmediateUpstreamNodes(lambda x: x.op == "LoopCond")
.visit_many(g, self.switches)
.get_result()
)
)
self.subgraph = FindSubgraph(self.exits).visit_many(g, self.enters).get_result()
self.cond = FindSubgraph(self.switches).visit_many(g, self.merges).get_result()
self.body = (
FindSubgraph([node.name] + self.exits)
.visit_many(g, self.switches)
.get_result()
)
# drop merges and switches from cond and body
self.cond = [
i for i in self.cond if i not in (self.merges + self.switches + self.enters)
]
self.body = (
[i for i in self.body if i not in ([node.name] + self.switches)]
+ [node.name]
+ self.switches
+ self.merges
+ self.enters
)
# ok. we can now rebuild.
def _fix_graph_invariants(self, g):
import copy
check = lambda x: x is not None and len(x) > 0
check(self.exits)
check(self.merges)
check(self.enters)
check(self.switches)
check(self.subgraph)
check(self.cond)
check(self.loopcond)
assert len(self.loopcond) == 1
# maintain the invariant of a unique Enter node per argument
# functionalize_control_flow.cc:FunctionalizeLoop (295)
for i in copy.copy(self.enters):
node = g[i]
assert len(node.outputs) > 0
assert len(node.inputs) == 1
assert len(node.control_inputs) == 0
assert len(node.control_outputs) == 0
if len(node.outputs) == 1:
continue
node_output_copy = copy.copy(node.outputs)
for j in range(1, len(node_output_copy)):
# make a new enter node for each
new_enter_node = copy.deepcopy(node)
new_enter_node.inputs = []
new_enter_node.outputs = []
new_enter_node.name = node.name + "/trsplit%d" % (j)
g[new_enter_node.name] = new_enter_node
logger.debug("splitting %s", node.name)
# connect the new node
enter_output = node_output_copy[j]
disconnect_edge(g, node.name, enter_output)
connect_edge(g, new_enter_node.name, enter_output)
connect_sources(g, node.inputs, new_enter_node.name)
# insert into graph
self.enters.append(new_enter_node.name)
def functionalize_loops(self, tfssa, function_to_functionalize):
g = tfssa.functions[function_to_functionalize].graph
loopni = [a for a in g if g[a].op == "Enter"]
if len(loopni) == 0:
return False
self._search(g, loopni[0])
self.constant_enters = [
self.enters[i] for i in range(len(self.enters)) if self.is_constant[i]
]
self.enters = [
self.enters[i] for i in range(len(self.enters)) if not self.is_constant[i]
]
self._fix_graph_invariants(g)
# for each enter node, find the corresponding downstream merge node
enter_corresponding_merge = [
FindImmediateDownstreamNodes(lambda x: x.op == "Merge")
.visit(g, enter)
.get_result()[0]
for enter in self.enters
]
merge_corresponding_ni = [
FindImmediateUpstreamNodes(lambda x: x.op == "NextIteration")
.visit(g, merge)
.get_result()[0]
for merge in enter_corresponding_merge
]
switch_corresponding_merge = []
for merge in enter_corresponding_merge:
switch_after_merge = (
FindImmediateDownstreamNodes(lambda x: x.op == "Switch")
.visit(g, merge)
.get_result()
)
if len(switch_after_merge) > 0:
switch_corresponding_merge.append(switch_after_merge[0])
else:
# There are some situations there is no switch not for a given
# merge. While odd... its ok. we construct one
# In this situation there is no Exit either, but it can be
# constructed later on
new_switch_node = ParsedTFNode()
new_switch_node.op = "Switch"
new_switch_node.name = tfssa._find_free_name("fake_switch_")
g[new_switch_node.name] = new_switch_node
connect_edge(g, merge, new_switch_node.name)
connect_edge(g, self.loopcond[0], new_switch_node.name)
switch_corresponding_merge.append(new_switch_node.name)
exit_corresponding_switch = []
for switch in switch_corresponding_merge:
res = (
FindImmediateDownstreamNodes(lambda x: x.op == "Exit")
.visit(g, switch)
.get_result()
)
if len(res) > 0:
exit_corresponding_switch.append(res[0])
else:
new_exit_node = ParsedTFNode()
new_exit_node.op = "Exit"
new_exit_node.name = tfssa._find_free_name("fake_exit_")
g[new_exit_node.name] = new_exit_node
connect_edge(g, switch, new_exit_node.name)
exit_corresponding_switch.append(new_exit_node.name)
while_loop = ParsedTFNode()
while_loop.op = "while"
while_loop.name = tfssa._find_free_name("while_")
g[while_loop.name] = while_loop
# Build the Loop Condition
# replace all enters with a single make_tuple
# we replace merge with get_tuple and turn it into a function call
# terminated with LoopCond
make_inputs = ParsedTFNode()
make_inputs.op = "make_tuple"
make_inputs.name = tfssa._find_free_name("make_input_")
g[make_inputs.name] = make_inputs
for enter in self.enters:
replace_dest(g, g[enter].inputs[0], enter, make_inputs.name)
constant_base_index = len(make_inputs.inputs)
for enter in self.constant_enters:
replace_dest(g, g[enter].inputs[0], enter, make_inputs.name)
connect_edge(g, make_inputs.name, while_loop.name)
connect_dests(g, while_loop.name, exit_corresponding_switch)
# build the cond function
cond_body = ParsedTFNode()
cond_body.op = "function_entry"
cond_body.name = tfssa._find_free_name("cond_function_")
cond_body.inputs = []
g[cond_body.name] = cond_body
for merge_idx in range(len(enter_corresponding_merge)):
merge = enter_corresponding_merge[merge_idx]
switch = switch_corresponding_merge[merge_idx]
enter_node = g[self.enters[merge_idx]]
merge_node = g[merge]
if switch is not None:
switch_node = g[switch]
else:
switch_node = None
merge_node.op = "get_tuple"
merge_node.attr = {"index": merge_idx}
# disconnect merge from switch
# disconnect loopcond from switch
disconnect_edge(g, enter_node.name, merge_node.name)
if switch_node is not None:
disconnect_edge(g, merge_node.name, switch_node.name)
disconnect_edge(g, self.loopcond[0], switch_node.name)
for i in merge_node.inputs[:]:
disconnect_edge(g, i, merge_node.name)
connect_edge(g, cond_body.name, merge_node.name)
# delete get_tuple if it does nothing
if len(merge_node.outputs) == 0:
delete_node(g, merge)
g[self.loopcond[0]].op = "return"
# build the body function
body = ParsedTFNode()
body.op = "function_entry"
body.name = tfssa._find_free_name("body_function_")
body.inputs = []
g[body.name] = body
for switch_idx in range(len(switch_corresponding_merge)):
switch = switch_corresponding_merge[switch_idx]
exit = exit_corresponding_switch[switch_idx]
disconnect_edge(g, switch, exit)
# replace switch with a get_tuple
switch_node = g[switch]
switch_node.op = "get_tuple"
switch_node.attr = {"index": switch_idx}
connect_edge(g, body.name, switch_node.name)
# delete get_tuple if it does nothing
if len(switch_node.outputs) == 0:
delete_node(g, switch)
# replace all next_iteration with a single make_tuple
# we replace merge with get_tuple and turn it into a function call
# terminated with LoopCond
make_outputs = ParsedTFNode()
make_outputs.op = "make_tuple"
make_outputs.name = tfssa._find_free_name("make_output_")
g[make_outputs.name] = make_outputs
for ni in merge_corresponding_ni:
connect_edge(g, g[ni].inputs[0], make_outputs.name)
# connect constant enters to come from function
# connect constant enters to exit
for idx, enter in enumerate(self.constant_enters):
for output in list(g[enter].outputs):
if output not in self.cond and output not in self.body:
cond_intersection = (
FindSubgraph(self.cond).visit(g, output).get_result()
)
body_intersection = (
FindSubgraph(self.body).visit(g, output).get_result()
)
if len(cond_intersection) > 0:
cond_intersection.append(output)
self.cond += cond_intersection
if len(body_intersection) > 0:
body_intersection.append(output)
self.body += body_intersection
get_tuple = ParsedTFNode()
get_tuple.op = "get_tuple"
get_tuple.name = tfssa._find_free_name("get_tuple_const_")
get_tuple.attr = {"index": idx + constant_base_index}
g[get_tuple.name] = get_tuple
if output in self.cond:
connect_edge(g, cond_body.name, get_tuple.name)
elif output in self.body:
connect_edge(g, body.name, get_tuple.name)
replace_source(g, enter, output, get_tuple.name)
# body must accept and return everything
get_tuple = ParsedTFNode()
get_tuple.op = "get_tuple"
get_tuple.name = tfssa._find_free_name("get_tuple_const_")
get_tuple.attr = {"index": idx + constant_base_index}
g[get_tuple.name] = get_tuple
connect_edge(g, body.name, get_tuple.name)
connect_edge(g, get_tuple.name, make_outputs.name)
assert len(g[make_outputs.name].inputs) == len(g[make_inputs.name].inputs)
output_return = ParsedTFNode()
output_return.op = "return"
output_return.name = tfssa._find_free_name("body_return_")
g[output_return.name] = output_return
connect_edge(g, make_outputs.name, output_return.name)
while_loop.attr["cond_function"] = cond_body.name
while_loop.attr["body_function"] = body.name
for i in self.enters:
delete_node(g, i)
for i in self.next_iterations:
delete_node(g, i)
for i in self.constant_enters:
delete_node(g, i)
for i in range(len(exit_corresponding_switch)):
exit_node = exit_corresponding_switch[i]
g[exit_node].op = "get_tuple"
g[exit_node].attr = {"index": i}
cond_function = (
FindSubgraph(self.loopcond[0]).visit(g, cond_body.name).get_result()
)
cond_function = set(cond_function + [self.loopcond[0], cond_body.name])
body_function = (
FindSubgraph(output_return.name).visit(g, body.name).get_result()
)
body_function = set(body_function + [body.name, output_return.name])
# trace input constants associated with the cond_graph
# and the body_graph. These constants can only have one consumer
# for now. Any more and we will either need to associate
# it as an argument, or split the constant.
cond_constants = (
FindImmediateUpstreamNodes(lambda x: x.op == "Const")
.visit_many(g, cond_function)
.get_result()
)
body_constants = (
FindImmediateUpstreamNodes(lambda x: x.op == "Const")
.visit_many(g, body_function)
.get_result()
)
# for const_node in cond_constants + body_constants:
# assert(len(g[const_node].outputs) == 1)
cond_function = cond_function.union(set(cond_constants))
body_function = body_function.union(set(body_constants))
downstream_cond = (
FindAllReachableNodes(lambda x: True)
.visit_many(g, cond_function)
.get_result()
)
downstream_cond = set(downstream_cond) - cond_function
if len(downstream_cond) > 0:
logger.debug(
"Disconnecting unused variables in condition function %s",
downstream_cond,
)
for i in downstream_cond:
delete_node(g, i)
downstream_body = (
FindAllReachableNodes(lambda x: True)
.visit_many(g, body_function)
.get_result()
)
downstream_body = set(downstream_body) - body_function
if len(downstream_body) > 0:
logger.debug(
"Disconnecting unused variables in body function %s", downstream_body
)
for i in downstream_body:
delete_node(g, i)
cond_graph = {k: v for k, v in g.items() if k in cond_function}
body_graph = {k: v for k, v in g.items() if k in body_function}
g = {
k: v
for k, v in g.items()
if k not in cond_function and k not in body_function
}
# localize control dependencies
# In the main graph, reattach the control dependency to the while op
for k, v in g.items():
for idx in range(len(v.control_inputs)):
if v.control_inputs[idx] not in g:
v.control_inputs[idx] = while_loop.name
while_loop.control_outputs.append(k)
for idx in range(len(v.control_outputs)):
if v.control_outputs[idx] not in g:
v.control_outputs[idx] = while_loop.name
while_loop.control_inputs.append(k)
# in the cond and body graphs, drop non-local control dependencies
# entirely
for graph in [cond_graph, body_graph]:
for k, v in graph.items():
for idx in range(len(v.control_inputs) - 1, -1, -1):
if v.control_inputs[idx] not in graph:
v.control_inputs.pop(idx)
for idx in range(len(v.control_outputs) - 1, -1, -1):
if v.control_outputs[idx] not in graph:
v.control_outputs.pop(idx)
tfssa.functions[function_to_functionalize] = SSAFunction(g)
tfssa.add_function(cond_body.name, SSAFunction(cond_graph))
tfssa.add_function(body.name, SSAFunction(body_graph))
return True
def functionalize_loops(tfssa):
"""
Functionalize all loops in an tfssa
"""
done = False
while not done:
done = True
for f in list(tfssa.functions.keys()):
functionalize = FunctionalizeLoops()
ret = functionalize.functionalize_loops(tfssa, f)
if ret:
done = False
| bsd-3-clause | 6c8b70a0f7270e49179132f1f8262725 | 39.61194 | 88 | 0.559091 | 3.892704 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/backend/nn/op_mapping.py | 1 | 129299 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as _np
from tqdm import tqdm as _tqdm
from coremltools import _logger as logger
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry
from coremltools.converters.mil.mil.types.symbolic import (any_symbolic,
is_symbolic,
is_variadic)
from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network.quantization_utils import \
_convert_array_to_nbit_quantized_bytes
from coremltools.proto import NeuralNetwork_pb2
from .mil_to_nn_mapping_registry import (MIL_TO_NN_MAPPING_REGISTRY,
register_mil_to_nn_mapping)
def convert_ops(const_context, builder, ops, outputs):
"""
const_context: list[set of str]: const name for v1 & v2 (the same)
builder: neural_network.NeuralNetworkBuilder
ops: list[Operation], usually from Block.operations.
outputs: list[Var]. block outputs
"""
const_context.append(set())
custom_ops = SSAOpRegistry.custom_ops
for op in _tqdm(ops, desc="Translating MIL ==> NeuralNetwork Ops", unit=" ops"):
if op.op_type in custom_ops:
mapper = MIL_TO_NN_MAPPING_REGISTRY["custom_op"]
elif op.op_type in MIL_TO_NN_MAPPING_REGISTRY:
mapper = MIL_TO_NN_MAPPING_REGISTRY[op.op_type]
else:
msg = ("Op {} is used in the source model. This op is not supported "
"by the NeuralNetwork (compatibility with MacOS < 12, iOS < 15) model "
"type. To successfully convert this model, convert to the ML Program "
"model type (minimum target MacOS 12, iOS 15 and later).\n"
"Use coremltools.convert(..., convert_to=\"mlprogram\") to convert to ML Program.\n"
"block: {}")
raise NotImplementedError(msg.format(op.op_type, op.enclosing_block))
# const is globally shared in nn.
mapper(const_context, builder, op)
for ov in outputs:
# If block return value is a const, we need to add it.
if ov.op is None:
continue # placeholder
if ov.op.op_type == "const":
add_const(const_context, builder, ov.name, ov.val)
const_context.pop()
def make_input(const_context, builder, variables):
"""
Ensure that variables, if const, are added to builder.
variables: list[Var] or Var or str. Inputs for an nn layer.
Returns:
list[str] or str: variables' names.
"""
if isinstance(variables, (list, tuple)):
return [make_input(const_context, builder, v) for v in variables]
if isinstance(variables, str):
return variables
v = variables # variables is Var
if v.op is not None and v.op.op_type == "const" and v.name not in const_context[-1]:
add_const(const_context, builder, v.name, v.val)
return v.name
def _convert_pool(const_context, builder, op, mode, exclude_padding_from_average=True):
num_spatial_dimensions = len(op.kernel_sizes.val)
op_pad = op.pad.val if op.pad_type.val == 'custom' \
else [0] * num_spatial_dimensions * 2
padding_type = op.pad_type.val.upper()
same_padding_asymmetry_mode = "BOTTOM_RIGHT_HEAVY"
if padding_type == "SAME_LOWER":
if num_spatial_dimensions == 3:
msg = "For the neuralnetwork backend, padding_mode ``same_lower`` is not supported for 3d pooling."
raise ValueError(msg)
padding_type = "SAME"
same_padding_asymmetry_mode = "TOP_LEFT_HEAVY"
if num_spatial_dimensions == 1:
builder.add_expand_dims(
name=op.name + "_expanded",
input_name=op.x.name,
output_name=op.name + "_expanded",
axes=[-2],
)
# nn's add_pool function does not support CUSTOM padding,
# but VALID padding supports user-defined padding amounts.
# Therefore we map CUSTOM padding to VALID padding.
padding_type = "VALID" if padding_type == "CUSTOM" else padding_type
builder.add_pooling(
name=op.name,
height=1,
width=op.kernel_sizes.val[-1],
stride_height=1,
stride_width=op.strides.val[-1],
layer_type=mode.upper(),
padding_type="INCLUDE_LAST_PIXEL" if op.ceil_mode.val else padding_type,
input_name=make_input(const_context, builder, op.name + "_expanded"),
output_name=op.name + "_pool",
exclude_pad_area=exclude_padding_from_average,
padding_top=0,
padding_bottom=0,
padding_left=op_pad[0],
padding_right=op_pad[1],
is_global=False,
same_padding_asymmetry_mode=same_padding_asymmetry_mode,
)
builder.add_squeeze(
name=op.name + "_squeeze",
input_name=op.name + "_pool",
output_name=op.outputs[0].name,
axes=[-2],
)
elif num_spatial_dimensions == 2:
# nn's add_pool function does not support CUSTOM padding,
# but VALID padding supports user-defined padding amounts.
# Therefore we map CUSTOM padding to VALID padding.
padding_type = "VALID" if padding_type == "CUSTOM" else padding_type
builder.add_pooling(
name=op.name,
height=op.kernel_sizes.val[-2],
width=op.kernel_sizes.val[-1],
stride_height=op.strides.val[-2],
stride_width=op.strides.val[-1],
layer_type=mode.upper(),
padding_type="INCLUDE_LAST_PIXEL" if op.ceil_mode.val else padding_type,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
exclude_pad_area=exclude_padding_from_average,
padding_top=op_pad[0],
padding_bottom=op_pad[1],
padding_left=op_pad[2],
padding_right=op_pad[3],
is_global=False,
same_padding_asymmetry_mode=same_padding_asymmetry_mode,
)
elif num_spatial_dimensions == 3:
builder.add_pooling3d(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
pooling_type=mode.upper(),
kernel_depth=op.kernel_sizes.val[-3],
kernel_height=op.kernel_sizes.val[-2],
kernel_width=op.kernel_sizes.val[-1],
stride_depth=op.strides.val[-3],
stride_height=op.strides.val[-2],
stride_width=op.strides.val[-1],
padding_mode=op.pad_type.val,
custom_padding_front=op_pad[0],
custom_padding_back=op_pad[1],
custom_padding_top=op_pad[2],
custom_padding_bottom=op_pad[3],
custom_padding_left=op_pad[4],
custom_padding_right=op_pad[5],
average_pooling_count_excludes_padding=exclude_padding_from_average,
)
else:
raise ValueError(
"Unsupported number of spatial dimensions. Maximum is 3, but got %s"
% num_spatial_dimensions
)
def _try_convert_global_pool(const_context, builder, op, mode):
"""
Optional performance optimization pass that tries to lower spatial
reduce_mean / reduce_max to global_avg_pool / global_max_pool.
Return True if the lowering happened, otherwise return False to
continue as normal reduction op.
"""
rank = op.x.rank
if is_variadic(rank) or rank not in {4, 5}:
return False
keep_dims = op.keep_dims.val
if keep_dims is False:
return False
axes = None
if op.axes is not None and op.axes.val is not None:
axes = op.axes.val
else:
axes = list(range(rank))
if tuple(op.outputs[0].shape[:-2]) != tuple(op.inputs["x"].shape[:-2]):
return False
if not all([s == 1 for s in op.outputs[0].shape[-2:]]):
return False
builder.add_pooling(
name=op.name,
height=0,
width=0,
stride_height=0,
stride_width=0,
layer_type=mode.upper(),
padding_type="valid".upper(),
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
is_global=True,
)
return True
def add_const(const_context, builder, name, val):
"""
const_context (list of set of str): const names added to v1 builder. Const names are
identical between v2 and v1
name (str): name of const. Should be the same for v1 and v2.
val: np.ndarray
No return values as `name` is the name of const in v1.
Comment: we don't need to add scalar const as they are just fields in
layer proto message in NN.
If we really need a const scalar, we upcast it to rank-1.
"""
for const_set in const_context:
if name in const_set:
logger.warning("Const {} was already added.".format(name))
return
if not isinstance(val, (_np.ndarray, _np.generic)):
val = _np.array([val])
if val.dtype != _np.float:
# nn proto only supports float32 activation. (e.g., pred in cond op
# needs to be converted to float)
val = val.astype(_np.float)
rank = len(val.shape)
if rank == 0:
builder.add_load_constant_nd(
name=name, output_name=name, constant_value=val.reshape([1]), shape=[1]
)
else:
builder.add_load_constant_nd(
name=name, output_name=name, constant_value=val, shape=val.shape
)
const_context[-1].add(name)
logger.info("added const {} for builder {}".format(name, builder))
# Helper routines for recurrent layers
def _expand_dim(builder, node_name, input_name, axes):
builder.add_expand_dims(
name=node_name, input_name=input_name, output_name=node_name, axes=axes
)
def _squeeze(builder, node_name, input_name, axes):
builder.add_squeeze(
name=node_name, input_name=input_name, output_name=node_name, axes=axes
)
def _split(x, sections, axis=0):
if x is None:
return None
if x.shape[axis] % sections != 0:
raise ValueError(
"Cannot split axis {} into {} sections for input of shape {}".format(
axis, sections, x.shape
)
)
return _np.split(x, sections, axis=axis)
@register_mil_to_nn_mapping
def avg_pool(const_context, builder, op):
_convert_pool(
const_context=const_context,
builder=builder,
op=op,
mode="average",
exclude_padding_from_average=op.exclude_padding_from_average.val,
)
@register_mil_to_nn_mapping
def band_part(const_context, builder, op):
builder.add_matrix_band_part(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
num_lower=op.lower.val,
num_upper=op.upper.val,
)
@register_mil_to_nn_mapping
def batch_norm(const_context, builder, op):
channels = op.x.shape[1]
gamma = _np.array([1.0] * channels) if op.gamma is None else op.gamma.val
beta = _np.array([0.0] * channels) if op.beta is None else op.beta.val
x_name = make_input(const_context, builder, op.x)
out_name = op.outputs[0].name
is_batchnorm_1d = op.x.rank == 3
is_batchnorm_2d = op.x.rank == 4
is_batchnorm_3d = op.x.rank == 5
if is_batchnorm_1d:
x_name = op.name + "_expanded"
builder.add_expand_dims(
name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2],
)
out_name += "_batch_norm"
if is_batchnorm_1d or is_batchnorm_2d:
# batch norm 1d / 2d
builder.add_batchnorm(
name=op.name,
channels=channels,
gamma=gamma,
beta=beta,
mean=op.mean.val,
variance=op.variance.val,
input_name=x_name,
output_name=out_name,
compute_mean_var=False,
instance_normalization=False,
epsilon=op.epsilon.val,
)
elif is_batchnorm_3d:
# batch norm 3d
batch_size, channel, height, width, depth = op.x.shape
assert not is_symbolic(channel), "Channel dimension must be known for batchnorm layer."
symbolic_num = sum([is_symbolic(x) for x in op.x.shape])
if symbolic_num > 1:
gamma_expand = _np.expand_dims(gamma, axis=(0, 2, 3, 4))
beta_expand = _np.expand_dims(beta, axis=(0, 2, 3, 4))
mean_expand = _np.expand_dims(op.mean.val, axis=(0, 2, 3, 4))
var_expand = _np.expand_dims(op.variance.val, axis=(0, 2, 3, 4))
# compute batch norm 3d by decomposing it into elementwise operations
negative_mean_name = op.name + "_negative_mean"
add_const(const_context, builder, negative_mean_name, -mean_expand)
numerator_name = op.name + "_numerator"
builder.add_add_broadcastable(
name=numerator_name,
input_names=[x_name, negative_mean_name],
output_name=numerator_name,
)
var_expand = var_expand + op.epsilon.val
denominator = _np.sqrt(var_expand)
gamma_expand = gamma_expand / denominator
gamma_name = op.name + "_gamma"
add_const(const_context, builder, gamma_name, gamma_expand)
mul_name = op.name + "_mul"
builder.add_multiply_broadcastable(
name=mul_name,
input_names=[numerator_name, gamma_name],
output_name=mul_name,
)
beta_name = op.name + "_beta"
add_const(const_context, builder, beta_name, beta_expand)
builder.add_add_broadcastable(
name=out_name,
input_names=[mul_name, beta_name],
output_name=out_name,
)
else:
is_batch_symbloic = is_symbolic(batch_size)
is_height_symbolic = is_symbolic(height)
is_width_symbolic = is_symbolic(width)
is_depth_symbolic = is_symbolic(depth)
if is_batch_symbloic:
shape1 = [-1, channel, height * width, depth]
shape2 = [-1, channel, height, width, depth]
elif is_height_symbolic:
shape1 = [batch_size, channel, -1, width*depth]
shape2 = [batch_size, channel, -1, width, depth]
elif is_width_symbolic:
shape1 = [batch_size, channel, -1, height*depth]
shape2 = [batch_size, channel, height, -1, depth]
elif is_depth_symbolic:
shape1 = [batch_size, channel, height * width, -1]
shape2 = [batch_size, channel, height, width, -1]
else:
shape1 = [batch_size, channel, height*width, depth]
shape2 = [batch_size, channel, height, width, depth]
reshape_4d_name = op.name + "_reshape_4d"
builder.add_reshape_static(
name=reshape_4d_name,
input_name=x_name,
output_name=reshape_4d_name,
output_shape=shape1,
)
batchnorm_name = op.name + "_batchnorm_4d"
builder.add_batchnorm(
name=batchnorm_name,
channels=channels,
gamma=gamma,
beta=beta,
mean=op.mean.val,
variance=op.variance.val,
input_name=reshape_4d_name,
output_name=batchnorm_name,
compute_mean_var=False,
instance_normalization=False,
epsilon=op.epsilon.val,
)
builder.add_reshape_static(
name=out_name,
input_name=batchnorm_name,
output_name=out_name,
output_shape=shape2,
)
# Squeeze added `Width` dimension for 1d case
if is_batchnorm_1d:
x_name = op.name + "_squeeze"
builder.add_squeeze(
name=x_name,
input_name=out_name,
output_name=op.outputs[0].name,
axes=[-2],
)
@register_mil_to_nn_mapping
def const(const_context, builder, op):
# const in V2 are added to V1 lazily.
pass
def conv_helper(const_context, builder, op):
# v2 x: (n, C_in/groups, spatial_dims)
x_name = make_input(const_context, builder, op.x)
out_name = op.outputs[0].name
is_conv1d = op.x.rank == 3
is_conv2d = op.x.rank == 4
is_conv3d = op.x.rank == 5
if not (is_conv1d or is_conv2d or is_conv3d):
raise ValueError(
"Input tensor rank '{}' is not one of '{}'.".format(op.x.rank, (3, 4, 5),)
)
if is_conv1d:
x_name = op.name + "_expand_dim"
out_name += "_expanded"
builder.add_expand_dims(
name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2],
)
# `x_name` is guaranteed to be (n, C_in/groups, spatial_dims) for 1D and 2D convolution
# W_v1 wil be np.ndarray (if W is const at compile time) or None
# (if W is not known at compile time).
weights = None
input_names = [x_name]
if op.weight.val is not None:
# v2 convolution (conv3d) expects weights to have shape (C_out, C_in/groups, spatial_dims)
# v1 convolution expects (H, W, C_in/groups, C_out) or (D, H, W, C_in/groups, C_out)
weights = op.weight.val
if is_conv1d:
weights = _np.expand_dims(op.weight.val, -2)
if is_conv1d or is_conv2d:
weights = _np.transpose(weights, [2, 3, 1, 0])
else:
# op.weight is not const at compile time.
# When weight is dynamic, v1 convolution expects weight to be
# (C_out, C_in/groups, H, W)
# TODO 3D convolution doesn't support dynamic weights:
if is_conv3d:
raise ValueError("3D Convolution doesn't support dynamic weights.")
weights_name = op.weight.name
if is_conv1d:
weights_name += "_expand_dim"
builder.add_expand_dims(
name=weights_name,
input_name=op.weight.name,
output_name=weights_name,
axes=[-2],
)
input_names.append(weights_name)
# padding
padding_mode = op.pad_type.val
pad = {}
if padding_mode == "custom":
if is_conv1d:
padding_mode = "valid"
pad["padding_top"] = 0
pad["padding_bottom"] = 0
pad["padding_left"] = op.pad.val[0]
pad["padding_right"] = op.pad.val[1]
elif is_conv2d:
padding_mode = "valid"
pad["padding_top"] = op.pad.val[0]
pad["padding_bottom"] = op.pad.val[1]
pad["padding_left"] = op.pad.val[2]
pad["padding_right"] = op.pad.val[3]
else:
pad["padding_front"] = op.pad.val[0]
pad["padding_back"] = op.pad.val[1]
pad["padding_top"] = op.pad.val[2]
pad["padding_bottom"] = op.pad.val[3]
pad["padding_left"] = op.pad.val[4]
pad["padding_right"] = op.pad.val[5]
same_padding_asymmetry_mode = "BOTTOM_RIGHT_HEAVY"
if padding_mode == "same_lower":
if is_conv3d:
msg = "For the neuralnetwork backend, padding_mode ``same_lower`` is not supported for conv 3d."
raise ValueError(msg)
padding_mode = "same"
same_padding_asymmetry_mode = "TOP_LEFT_HEAVY"
has_bias = op.bias is not None
groups = op.groups.val
strides = op.strides.val.tolist()
dilations = op.dilations.val.tolist()
if is_conv1d:
dilations = dilations[:-1] + [1] + dilations[-1:]
strides = strides[:-1] + [1] + strides[-1:]
if weights is not None and op.op_type == "conv_quantized":
nbits = op.nbits.val
weights = _convert_array_to_nbit_quantized_bytes(weights.flatten(), nbits).tobytes()
quantization_type = op.quantization_type.val
quant_bias = op.quant_bias.val
quant_scale = op.quant_scale.val
else:
quantization_type = None
nbits = None
quant_bias = None
quant_scale = None
if is_conv1d or is_conv2d:
if weights is None and has_bias:
# weights are dyanmic.
# In this case, bias, if present, cannot be part of the conv op
# it needs to be added separately via an add op
out_name += "_without_bias"
if weights is None and groups > 1:
raise NotImplementedError("Convolution with dynamic weights and groups > 1 is not supported on the "
"neuralnetwork backend. Please use the mlprogram backend "
"(convert_to=\"mlprogram\")")
builder.add_convolution(
name=out_name,
kernel_channels=op.weight.shape[1],
output_channels=op.weight.shape[0],
height= 1 if is_conv1d else op.weight.shape[2],
width= op.weight.shape[2] if is_conv1d else op.weight.shape[3],
stride_height=strides[0],
stride_width=strides[1],
border_mode=padding_mode,
same_padding_asymmetry_mode=same_padding_asymmetry_mode,
groups=groups,
W=weights,
b=op.bias.val if has_bias and weights is not None else None,
has_bias=has_bias if weights is not None else False,
is_deconv=False,
input_name=input_names,
output_name=out_name,
dilation_factors=dilations,
quantization_type=quantization_type,
nbits=nbits,
quant_bias=quant_bias,
quant_scale=quant_scale,
**pad # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad`
)
# add bias if weights are dynamic
if weights is None and has_bias:
Cout = op.weight.shape[0]
assert op.bias.val.size == Cout, \
"size of bias for convolution must be same as the number of output channels"
builder.add_load_constant_nd(
name=op.name + '_constant_bias', output_name=op.name + "_constant_bias",
constant_value=op.bias.val.reshape((Cout, 1, 1)), shape=(Cout, 1, 1)
)
add_op_output_name = op.name + "_with_bias" if is_conv1d else op.outputs[0].name
builder.add_add_broadcastable(
name=add_op_output_name,
input_names=[out_name, op.name + "_constant_bias"],
output_name=add_op_output_name,
)
if is_conv1d:
out_name = add_op_output_name
# Squeeze added `Width` dimension for 1d case
if is_conv1d:
x_name = op.name + "expand_dim"
builder.add_squeeze(
name=op.name,
input_name=out_name,
output_name=op.outputs[0].name,
axes=[-2],
)
if is_conv3d:
builder.add_convolution3d(
name=op.name,
input_channels=op.weight.shape[1] * groups,
output_channels=op.weight.shape[0],
depth=op.weight.shape[2],
height=op.weight.shape[3],
width=op.weight.shape[4],
W=op.weight.val,
b=op.bias.val if has_bias else None,
has_bias=has_bias,
groups=groups,
stride_depth=strides[0],
stride_height=strides[1],
stride_width=strides[2],
dilation_depth=dilations[0],
dilation_height=dilations[1],
dilation_width=dilations[2],
padding_mode=padding_mode,
is_deconv=False,
output_shape=None,
input_name=input_names,
output_name=out_name,
**pad # Python 2.7.16 will fail with a syntax error if a comma is included after `**pad`
)
@register_mil_to_nn_mapping
def conv(const_context, builder, op):
conv_helper(const_context, builder, op)
@register_mil_to_nn_mapping()
def conv_quantized(const_context, builder, op):
conv_helper(const_context, builder, op)
@register_mil_to_nn_mapping
def cumsum(const_context, builder, op):
input_names = make_input(const_context, builder, [op.x])
builder.add_cumsum(
name=op.name,
input_names=input_names,
output_name=op.outputs[0].name,
axis=op.axis.val,
reverse=op.reverse.val,
exclusive=op.exclusive.val,
)
def _add_elementwise_unary(
const_context, builder, op, mode, output_name=None, **kwargs
):
output_name = output_name if output_name else op.outputs[0].name
name = output_name if output_name else op.name
if mode in ["sqrt", "rsqrt", "inverse", "power", "exp", "log", "abs", "threshold"]:
builder.add_unary(
name=name,
input_name=make_input(const_context, builder, op.x),
output_name=output_name,
mode=mode,
**kwargs
)
else:
add_func = getattr(builder, "add_" + mode, None)
if add_func is None:
logger.error(
"Elementwise unary method {} not found in builder.".format(mode)
)
add_func(
name=name,
input_name=make_input(const_context, builder, op.x),
output_name=output_name,
**kwargs
)
def _add_elementwise_binary(
const_context, builder, op, mode, output_name=None, **kwargs
):
output_name = output_name if output_name else op.outputs[0].name
name = output_name if output_name else op.name
if mode in ["add", "multiply"]:
params = {"name": name, "output_name": output_name, "mode": mode.upper()}
if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val):
params["input_names"] = make_input(const_context, builder, [op.y])
val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32)
params["alpha"] = np_val_to_py_type(val)
builder.add_elementwise(**params)
return
elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val):
params["input_names"] = make_input(const_context, builder, [op.x])
val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32)
params["alpha"] = np_val_to_py_type(val)
builder.add_elementwise(**params)
return
elif mode in ["equal", "not_equal"]:
add_func = getattr(builder, "add_" + mode, None)
params = {"name": name, "output_name": output_name}
if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val):
params["input_names"] = make_input(const_context, builder, [op.y])
val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32)
params["alpha"] = np_val_to_py_type(val)
add_func(**params)
return
elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val):
params["input_names"] = make_input(const_context, builder, [op.x])
val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32)
params["alpha"] = np_val_to_py_type(val)
add_func(**params)
return
elif mode in ["greater_than", "greater_equal", "less_than", "less_equal"]:
params = {"name": name, "output_name": output_name}
if op.x.val is not None and op.x.rank == 0 and _np.isfinite(op.x.val):
params["input_names"] = make_input(const_context, builder, [op.y])
val = op.x.val if not isinstance(op.x.val, _np.float16) else op.x.val.astype(_np.float32)
params["alpha"] = np_val_to_py_type(val)
if "less" in mode:
params["use_greater_than_equal"] = mode.endswith("_equal")
builder.add_greater_than(**params)
elif "greater" in mode:
params["use_less_than_equal"] = mode.endswith("_equal")
builder.add_less_than(**params)
return
elif op.y.val is not None and op.y.rank == 0 and _np.isfinite(op.y.val):
params["input_names"] = make_input(const_context, builder, [op.x])
val = op.y.val if not isinstance(op.y.val, _np.float16) else op.y.val.astype(_np.float32)
params["alpha"] = np_val_to_py_type(val)
if "greater" in mode:
params["use_greater_than_equal"] = mode.endswith("_equal")
builder.add_greater_than(**params)
elif "less" in mode:
params["use_less_than_equal"] = mode.endswith("_equal")
builder.add_less_than(**params)
return
if op.x.val is not None:
add_const(const_context, builder, op.x.name, op.x.val)
if op.y.val is not None:
if mode == "pow":
_add_elementwise_unary(
const_context,
builder,
op,
"power",
output_name=output_name,
alpha=op.y.val,
)
return
add_const(const_context, builder, op.y.name, op.y.val)
if mode in {"add", "multiply", "max", "min"} and op.x.shape == op.y.shape:
builder.add_elementwise(
name=name,
input_names=make_input(const_context, builder, [op.x, op.y]),
output_name=output_name,
mode=mode.upper(),
)
return
# the broadcast feature in the elementwise layer is hardcoded to 4D or less
# for the 5d tensor, we need to use broadcasable layers instead.
if mode in {"add", "multiply", "subtract"} and op.x.rank < 5 and op.y.rank < 5:
shape_x = _np.array([1] * (5 - op.x.rank) + list(op.x.shape))
shape_y = _np.array([1] * (5 - op.y.rank) + list(op.y.shape))
internal_x = internal_y = None
if all(shape_x == 1):
internal_y = op.x
internal_x = op.y
elif all(shape_y == 1):
internal_x = op.x
internal_y = op.y
for indices in ([1], [2], [3, 4], [2, 3, 4], [1, 2, 3, 4]):
if indices == [1, 2, 3, 4] and mode == "multiply":
# INTERNAL_MUL_XYKN not implemented
continue
if all(shape_x[indices] == shape_y[indices]):
if all([True if i in indices else s == 1 for i, s in enumerate(shape_x)]):
internal_y = op.x
internal_x = op.y
break
if all([True if i in indices else s == 1 for i, s in enumerate(shape_y)]):
internal_x = op.x
internal_y = op.y
break
if internal_x is not None:
if mode in {"add", "multiply"}:
builder.add_elementwise(
name=name,
input_names=make_input(const_context, builder, [internal_x, internal_y]),
output_name=output_name,
mode=mode.upper(),
)
elif mode == "subtract":
builder.add_activation(
name="_neg_y_" + name,
input_name=make_input(const_context, builder, op.y),
output_name="_neg_y_" + output_name,
non_linearity="LINEAR",
params=[-1, 0])
if op.x == internal_y:
internal_x = "_neg_y_" + output_name
else:
internal_y = "_neg_y_" + output_name
builder.add_elementwise(
name=name,
input_names=make_input(const_context, builder, [internal_x, internal_y]),
output_name=output_name,
mode="ADD",
)
return
if mode in {"add", "multiply", "max", "min"}:
add_func = getattr(builder, "add_" + mode + "_broadcastable", None)
if add_func is None:
msg = "Element-wise binary method {} not found in builder."
raise ValueError(msg.format(mode))
add_func(
name=name,
input_names=make_input(const_context, builder, [op.x, op.y]),
output_name=output_name,
**kwargs
)
else:
if mode in ["divide", "floor_div", "mod", "pow", "subtract"]:
add_func = getattr(builder, "add_" + mode + "_broadcastable", None)
elif mode == "less_equal":
add_func = builder.add_less_than
kwargs["use_less_than_equal"] = True
elif mode == "greater_equal":
add_func = builder.add_greater_than
kwargs["use_greater_than_equal"] = True
else:
add_func = getattr(builder, "add_" + mode, None)
if add_func is None:
msg = "Element-wise binary method {} not found in builder."
raise ValueError(msg.format(mode))
add_func(
name=name,
input_names=make_input(const_context, builder, [op.x, op.y]),
output_name=output_name,
**kwargs
)
def _add_logical(const_context, builder, op, mode):
input_names = []
input_names.append(make_input(const_context, builder, op.x))
if mode != "NOT":
input_names.append(make_input(const_context, builder, op.y))
builder.add_logical(
name=op.name, input_names=input_names, output_name=op.outputs[0].name, mode=mode
)
@register_mil_to_nn_mapping
def abs(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "abs")
@register_mil_to_nn_mapping
def acos(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "acos")
@register_mil_to_nn_mapping
def add(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "add")
@register_mil_to_nn_mapping
def asin(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "asin")
@register_mil_to_nn_mapping
def atan(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "atan")
@register_mil_to_nn_mapping
def atanh(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "atanh")
@register_mil_to_nn_mapping
def cast(const_context, builder, op):
if op.dtype.val in ["int32", "int64"]:
_add_elementwise_unary(
const_context, builder, op, "floor", output_name=op.name + "_floor"
)
_add_elementwise_unary(
const_context, builder, op, "ceil", output_name=op.name + "_ceil"
)
builder.add_greater_than(
name=op.name + "_cond",
input_names=[make_input(const_context, builder, op.x)],
output_name=op.name + "_cond",
alpha=0.0,
)
builder.add_where_broadcastable(
name=op.name,
input_names=[op.name + i for i in ["_cond", "_floor", "_ceil"]],
output_name=op.outputs[0].name,
)
elif op.dtype.val in ["fp16", "fp32", "fp64"]:
builder.add_activation(
name=op.name,
non_linearity="LINEAR",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
params=[1.0, 0.0],
)
elif op.dtype.val == "bool":
builder.add_not_equal(
name=op.name,
input_names=op.x.name,
output_name=op.outputs[0].name,
alpha=0.0,
)
else:
raise NotImplementedError(
"Parameter dtype of the cast operation can be one of the {}. "
"Provided {}".format(["int32", "int64", "fp16", "fp32", "fp64"], op.dtype.val)
)
@register_mil_to_nn_mapping
def ceil(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "ceil")
@register_mil_to_nn_mapping
def clip(const_context, builder, op):
_add_elementwise_unary(
const_context,
builder,
op,
"clip",
min_value=op.alpha.val,
max_value=op.beta.val,
)
@register_mil_to_nn_mapping
def cos(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "cos")
@register_mil_to_nn_mapping
def cosh(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "cosh")
@register_mil_to_nn_mapping
def einsum(const_context, builder, op):
'''
MIL einsum is either
- (B,C,H,W1) * (B,W1,H,W2) = (B,C,H,W2)
or
- (C,H,W1) * (W1,H,W2) = (C,H,W2)
Hence to support it, first transpose the 2 inputs, so that the matrices
to be multiplied are on the last 2 axes,
then call bmm, and finally transpose the result again
'''
rank = op.values[0].rank
perm = [0, 2, 1, 3] if rank == 4 else [1, 0, 2]
input_names = make_input(const_context, builder, op.values)
output_name_1 = op.name + "_transpose_1"
output_name_2 = op.name + "_transpose_2"
builder.add_transpose(name=op.name + "_transpose_x",
axes=perm,
input_name=input_names[0],
output_name=output_name_1
)
builder.add_transpose(name=op.name + "_transpose_y",
axes=perm,
input_name=input_names[1],
output_name=output_name_2
)
builder.add_batched_mat_mul(
name=op.name + "_batch_matmul",
input_names=[output_name_1, output_name_2],
output_name=op.outputs[0].name + "_pre_transpose"
)
builder.add_transpose(name=op.name,
axes=perm,
input_name=op.outputs[0].name + "_pre_transpose",
output_name=op.outputs[0].name
)
@register_mil_to_nn_mapping
def equal(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "equal")
@register_mil_to_nn_mapping
def exp(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "exp")
@register_mil_to_nn_mapping
def exp2(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "exp2")
@register_mil_to_nn_mapping
def floor(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "floor")
@register_mil_to_nn_mapping
def floor_div(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "floor_div")
@register_mil_to_nn_mapping
def greater(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "greater_than")
@register_mil_to_nn_mapping
def greater_equal(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "greater_equal")
@register_mil_to_nn_mapping
def inverse(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "inverse", epsilon=op.epsilon.val)
@register_mil_to_nn_mapping
def less(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "less_than")
@register_mil_to_nn_mapping
def less_equal(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "less_equal")
@register_mil_to_nn_mapping
def log(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "log", epsilon=op.epsilon.val)
@register_mil_to_nn_mapping
def logical_and(const_context, builder, op):
_add_logical(const_context, builder, op, "AND")
@register_mil_to_nn_mapping
def logical_not(const_context, builder, op):
_add_logical(const_context, builder, op, "NOT")
@register_mil_to_nn_mapping
def logical_or(const_context, builder, op):
_add_logical(const_context, builder, op, "OR")
@register_mil_to_nn_mapping
def logical_xor(const_context, builder, op):
_add_logical(const_context, builder, op, "XOR")
@register_mil_to_nn_mapping
def maximum(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "max")
@register_mil_to_nn_mapping
def minimum(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "min")
@register_mil_to_nn_mapping
def mod(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "mod")
@register_mil_to_nn_mapping
def mul(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "multiply")
@register_mil_to_nn_mapping
def not_equal(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "not_equal")
@register_mil_to_nn_mapping
def pow(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "pow")
@register_mil_to_nn_mapping
def real_div(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "divide")
@register_mil_to_nn_mapping
def round(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "round")
@register_mil_to_nn_mapping
def rsqrt(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "rsqrt", epsilon=op.epsilon.val)
@register_mil_to_nn_mapping
def sign(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "sign")
@register_mil_to_nn_mapping
def sin(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "sin")
@register_mil_to_nn_mapping
def sinh(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "sinh")
@register_mil_to_nn_mapping
def slice_by_index(const_context, builder, op):
rank = op.x.rank
stride = [1] * rank if op.stride is None else op.stride.val
begin_mask = [False] * rank if op.begin_mask is None else op.begin_mask.val
end_mask = [False] * rank if op.end_mask is None else op.end_mask.val
squeeze_mask = [False] * rank if op.squeeze_mask is None else op.squeeze_mask.val
if op.begin.val is not None and op.end.val is not None:
# If only one dimension is sliced, we should use the slice layer instead of static_slice or dynamic_slice
# In general, slice has a better performance.
begin = op.begin.val
end = op.end.val
slice_dim = []
for i in range(rank):
if (not begin_mask[i] and begin[i] != 0) or \
(not end_mask[i] and end[i] != op.x.shape[i]):
slice_dim.append(i)
if len(slice_dim) == 1 and not squeeze_mask[slice_dim[0]]:
dim = slice_dim[0] - rank
if dim in [-3, -2, -1]:
# get the axis, only channel, width, and depth dimension are supported
axis = None
if dim == -1:
axis = "width"
elif dim == -2:
axis = "height"
elif dim == -3:
axis = "channel"
start_index = 0 if begin_mask[dim] else begin[dim]
end_index = op.x.shape[dim] if end_mask[dim] else end[dim]
shape = op.x.shape
if not is_symbolic(shape[dim]):
if start_index < 0:
start_index += shape[dim]
if not is_symbolic(end_index) and start_index >= 0 and stride[dim] >= 1:
builder.add_slice(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axis=axis,
start_index=start_index,
end_index=end_index,
stride=stride[dim],
)
return
# use add_slice_static
builder.add_slice_static(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
begin_ids=op.begin.val,
end_ids=op.end.val,
strides=np_val_to_py_type(stride),
begin_masks=np_val_to_py_type(begin_mask),
end_masks=np_val_to_py_type(end_mask),
squeeze_masks=np_val_to_py_type(squeeze_mask),
)
else:
builder.add_slice_dynamic(
name=op.name,
input_names=make_input(const_context, builder, [op.x, op.begin, op.end]),
output_name=op.outputs[0].name,
strides=np_val_to_py_type(stride),
begin_masks=np_val_to_py_type(begin_mask),
end_masks=np_val_to_py_type(end_mask),
squeeze_masks=np_val_to_py_type(squeeze_mask),
)
@register_mil_to_nn_mapping
def slice_by_size(const_context, builder, op):
"""
If the inputs satisfy
1. op.x has static input shape for those dimension whose size is not -1
2. op.begin and op.size are both known during compile time
we use add_slice_static directly
Otherwise, build a block of ops achieving slice_by_size with dynamic input x and size.
"""
# The static case
if op.begin.val is not None and op.size.val is not None:
begin = op.begin.val
size = op.size.val
rank = op.x.rank
end = []
for i in range(rank):
if size[i] == -1:
end.append(op.x.shape[i])
else:
end.append(begin[i] + size[i])
if not any_symbolic(end):
builder.add_slice_static(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
begin_ids=begin,
end_ids=end,
strides=[1] * rank,
begin_masks=[False] * rank,
end_masks=[False] * rank,
squeeze_masks=[False] * rank,
)
return
# The dynamic case
# get the end_index of input x
# for instance, x with shape [2,3,4] results in [2,3,4]
end_index_name = op.name + "_end_index"
builder.add_get_shape(
name=end_index_name,
input_name=make_input(const_context, builder, op.x),
output_name=end_index_name,
)
# get the mask where size = -1
# for instance, size = [-1,1,2] results in [1,0,0]
const_name = op.name + "_const_name"
add_const(const_context, builder, const_name, _np.array([-1] * op.x.rank))
is_end_mask_name = op.name + "_is_end_mask"
builder.add_equal(
name=is_end_mask_name,
input_names=make_input(const_context, builder, [const_name, op.size]),
output_name=is_end_mask_name,
)
# get the mask where size != -1
# for instance, size = [-1,1,2] results in [0,1,1]
is_not_end_mask_name = op.name + "_is_not_end_mask"
builder.add_not_equal(
name=is_not_end_mask_name,
input_names=make_input(const_context, builder, [const_name, op.size]),
output_name=is_not_end_mask_name,
)
# get the end index for dimensions i where size[i] = -1
# for size[i] != -1, just make it 0
# for instance, x with shape [2,3,4] and size = [-1,1,2]
# results in [2,0,0]
end_index_with_mask_name = op.name + "_end_index_with_mask"
builder.add_elementwise(
name=end_index_with_mask_name,
input_names=[end_index_name, is_end_mask_name],
output_name=end_index_with_mask_name,
mode="MULTIPLY",
)
# get the end index for dimension i where size[i] != -1
# for size[i] = 1, just make it 0
# for instance, x with shape [2,3,4], size = [-1,1,2],
# begin = [0,1,1] results in [0,2,3]
end_ids = op.name + "_end_ids"
builder.add_elementwise(
name=end_ids,
input_names=make_input(const_context, builder, [op.begin, op.size]),
output_name=end_ids,
mode="ADD",
)
end_index_without_mask_name = op.name + "_end_index_without_mask"
builder.add_elementwise(
name=end_index_without_mask_name,
input_names=make_input(const_context, builder, [is_not_end_mask_name, end_ids]),
output_name=end_index_without_mask_name,
mode="MULTIPLY",
)
# add two end index array together to get the final index
final_end_index_name = op.name + "_final_index"
builder.add_elementwise(
name=final_end_index_name,
input_names=make_input(
const_context,
builder,
[end_index_with_mask_name, end_index_without_mask_name],
),
output_name=final_end_index_name,
mode="ADD",
)
input_names = make_input(
const_context, builder, [op.x, op.begin, final_end_index_name]
)
builder.add_slice_dynamic(
name=op.name, input_names=input_names, output_name=op.outputs[0].name
)
@register_mil_to_nn_mapping
def sqrt(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "sqrt")
@register_mil_to_nn_mapping
def square(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "power", alpha=2.0)
@register_mil_to_nn_mapping
def sub(const_context, builder, op):
_add_elementwise_binary(const_context, builder, op, "subtract")
@register_mil_to_nn_mapping
def tan(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "tan")
@register_mil_to_nn_mapping
def threshold(const_context, builder, op):
_add_elementwise_unary(const_context, builder, op, "threshold", alpha=op.alpha.val)
@register_mil_to_nn_mapping
def depth_to_space(const_context, builder, op):
builder.add_reorganize_data(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
mode="DEPTH_TO_SPACE",
block_size=op.block_size.val,
)
@register_mil_to_nn_mapping
def expand_dims(const_context, builder, op):
builder.add_expand_dims(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axes=op.axes.val,
)
@register_mil_to_nn_mapping
def fill(const_context, builder, op):
if op.shape.val is None:
builder.add_fill_dynamic(
name=op.name,
input_name=make_input(const_context, builder, op.shape),
output_name=op.outputs[0].name,
value=op.value.val,
)
else:
builder.add_fill_static(
name=op.name,
output_name=op.outputs[0].name,
output_shape=op.shape.val,
value=op.value.val,
)
@register_mil_to_nn_mapping
def random_bernoulli(const_context, builder, op):
if op.shape.val is None:
builder.add_random_bernoulli_dynamic(
name=op.name,
input_names=make_input(const_context, builder, [op.shape]),
output_name=op.outputs[0].name,
prob=op.prob.val,
seed=op.seed.val,
)
else:
builder.add_random_bernoulli_static(
name=op.name,
output_name=op.outputs[0].name,
output_shape=op.shape.val,
prob=op.prob.val,
seed=op.seed.val,
)
@register_mil_to_nn_mapping
def random_categorical(const_context, builder, op):
builder.add_categorical_distribution(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
num_samples=op.size.val,
is_logits=(op.mode.val == "logits"),
seed=op.seed.val,
)
@register_mil_to_nn_mapping
def random_normal(const_context, builder, op):
if op.shape.val is None:
builder.add_random_normal_dynamic(
name=op.name,
input_names=make_input(const_context, builder, [op.shape]),
output_name=op.outputs[0].name,
mean=op.mean.val,
stddev=op.stddev.val,
seed=op.seed.val,
)
else:
builder.add_random_normal_static(
name=op.name,
output_name=op.outputs[0].name,
output_shape=op.shape.val,
mean=op.mean.val,
stddev=op.stddev.val,
seed=op.seed.val,
)
@register_mil_to_nn_mapping
def random_uniform(const_context, builder, op):
if op.shape.val is None:
builder.add_random_uniform_dynamic(
name=op.name,
input_names=make_input(const_context, builder, [op.shape]),
output_name=op.outputs[0].name,
minval=op.low.val,
maxval=op.high.val,
seed=op.seed.val,
)
else:
builder.add_random_uniform_static(
name=op.name,
output_name=op.outputs[0].name,
output_shape=op.shape.val,
minval=op.low.val,
maxval=op.high.val,
seed=op.seed.val,
)
@register_mil_to_nn_mapping
def gru(const_context, builder, op):
make_input(const_context, builder, [op.x, op.initial_h])
# Input shape: [b, s, I]
input_name = op.x.name
# Shape: [b, H]
initial_h = op.initial_h.name
weight_ih = op.weight_ih.val
weight_hh = op.weight_hh.val
b = op.bias.val if op.bias is not None else None
direction = op.direction.val
output_sequence = op.output_sequence.val
# Add expand dims for input, in
_expand_dim(builder, input_name + "_expanded", input_name, [3, 4])
input_name += "_expanded"
if direction not in {"forward", "reverse"}:
raise ValueError(
"Unknown direction {} for GRU layer. Supported are forward, reverse".format(
direction
)
)
# Expand initial_h
_expand_dim(builder, initial_h + "_expanded", initial_h, [0, 3, 4])
initial_h += "_expanded"
def roz_to_zro(x):
if x is None:
return None
r, o, z = _split(x, sections=3, axis=0)
return [z, r, o]
# w_x: [H*I, H*I, H*I]
# w_h: [H*H, H*H, H*H]
# where, format is [Z, R, O]
# Z: Update gate, R: Reset gate, O: Output gate
w_x = roz_to_zro(weight_ih)
w_h = roz_to_zro(weight_hh)
# bias format: [3*H]
b = roz_to_zro(b)
input_size = w_x[0].shape[1]
hidden_size = w_x[0].shape[0]
# 2 outputs
# Y : [s/1, b, h, 1, 1]
# Y_h: [ 1, b, h, 1, 1]
output_names = [_output.name + "_5d" for _output in op.outputs]
builder.add_gru(
name=op.name,
W_h=w_h,
W_x=w_x,
b=b,
hidden_size=hidden_size,
input_size=input_size,
input_names=[input_name, initial_h],
output_names=output_names,
inner_activation=op.recurrent_activation.val,
activation=op.activation.val,
output_all=output_sequence,
reverse_input=(direction == "reverse"),
)
# Squeeze Output
# to output shape of [Seq Len or 1, Batch Size, Hidden Size]
_squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4])
# Squeeze Output H and Output C
# to output shape of [Batch Size, Hidden Size]
_squeeze(builder, op.outputs[1].name, output_names[1], axes=[0, 3, 4])
@register_mil_to_nn_mapping
def squeeze(const_context, builder, op):
axes = op.axes.val if op.axes is not None else None
builder.add_squeeze(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axes=axes,
squeeze_all=axes is None,
)
@register_mil_to_nn_mapping
def topk(const_context, builder, op):
builder.add_topk(
name=op.name,
input_names=make_input(const_context, builder, [op.x]),
output_names=[output.name for output in op.outputs],
k=op.k.val,
axis=op.axis.val,
use_bottom_k=op.ascending.val,
)
@register_mil_to_nn_mapping
def l2_pool(const_context, builder, op):
_convert_pool(const_context=const_context, builder=builder, op=op, mode="l2")
@register_mil_to_nn_mapping
def linear(const_context, builder, op):
out_channels, in_channels = op.weight.shape
if op.x.rank and op.x.rank <= 3 and op.x.rank > 0:
has_bias = op.bias is not None and op.bias.val is not None
builder.add_inner_product(
name=op.name,
W=op.weight.val,
b=op.bias.val if has_bias else None,
input_channels=in_channels,
output_channels=out_channels,
has_bias=has_bias,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
else:
builder.add_batched_mat_mul(
name=op.name,
input_names=make_input(const_context, builder, [op.x]),
output_name=op.outputs[0].name,
W=op.weight.val.T,
bias=op.bias.val,
weight_matrix_rows=in_channels,
weight_matrix_columns=out_channels,
)
@register_mil_to_nn_mapping
def matmul(const_context, builder, op):
weight = None
rows, columns = 0, 0
if (
op.y.val is not None
and op.y.rank == 2
and len(op.y.child_ops) == 1
and len(op.y.consuming_blocks) == 0
):
weight = op.y.val
if op.transpose_y.val:
weight = weight.transpose((1, 0))
rows, columns = weight.shape
input_names = make_input(const_context, builder, [op.x])
if op.transpose_x.val:
perm = [i for i in range(op.x.rank)]
perm[-1], perm[-2] = perm[-2], perm[-1]
name = op.name + "_x_transpose"
builder.add_transpose(
name=name, axes=perm, input_name=input_names[0], output_name=name
)
input_names = [name]
else:
input_names = make_input(const_context, builder, [op.x, op.y])
builder.add_batched_mat_mul(
name=op.name,
input_names=input_names,
output_name=op.outputs[0].name,
transpose_a=op.transpose_x.val,
transpose_b=op.transpose_y.val,
W=weight,
weight_matrix_rows=rows,
weight_matrix_columns=columns,
)
@register_mil_to_nn_mapping
def max_pool(const_context, builder, op):
_convert_pool(const_context=const_context, builder=builder, op=op, mode="max")
@register_mil_to_nn_mapping
def non_zero(const_context, builder, op):
builder.add_where_nonzero(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def lstm(const_context, builder, op):
make_input(const_context, builder, [op.x, op.initial_h, op.initial_c])
# Input shape [b, s, I]
input_name = op.x.name
# Shape: [b, DIRECTION*H]
initial_h = op.initial_h.name
initial_c = op.initial_c.name
wt_ih = op.weight_ih.val
wt_hh = op.weight_hh.val
b = op.bias.val if op.bias is not None else None
direction = op.direction.val
output_sequence = op.output_sequence.val
peephole = op.peephole.val if op.peephole is not None else None
# High enough clip value to be ineffective!
clip = 500.0 if op.clip is None else op.clip.val
# Add expand dims for input, in
_expand_dim(builder, input_name + "_expanded", input_name, [3, 4])
input_name += "_expanded"
if direction in {"forward", "reverse"}:
# Expand initial_h and initial_c,
# from shape (B, H) to shape (1, Batch, H, 1, 1)
_expand_dim(builder, initial_h + "_expanded", initial_h, [0, 3, 4])
initial_h += "_expanded"
# initial_h may have the same name as initial_c (e.g., same Var).
# Append a different string to avoid conflict
_expand_dim(builder, initial_c + "_expanded2", initial_c, [0, 3, 4])
initial_c += "_expanded2"
# w_x: [H*I, H*I, H*I, H*I]
# w_h: [H*H, H*H, H*H, H*H]
# where format is, [input gate, forget gate, output gate, cell gate]
w_x = _split(wt_ih, sections=4)
w_h = _split(wt_hh, sections=4)
# bias format: [4*H]
b = _split(b, sections=4) # ifoz layout
# peephole format: [3*H]
# where format is, [input gate, forget gate, output gate]
peephole = _split(peephole, sections=3)
input_size = w_x[0].shape[1]
hidden_size = w_h[0].shape[1]
# 3 outputs
# Y : [s/1, b, h, 1, 1]
# Y_h: [ 1, b, h, 1, 1]
# Y_c: [ 1, b, h, 1, 1]
output_names = [_output.name + "_5d" for _output in op.outputs]
builder.add_unilstm(
name=op.name,
W_h=w_h,
W_x=w_x,
b=b,
hidden_size=hidden_size,
input_size=input_size,
input_names=[input_name, initial_h, initial_c],
output_names=output_names,
inner_activation=op.recurrent_activation.val,
cell_state_update_activation=op.cell_activation.val,
output_activation=op.activation.val,
peep=peephole,
output_all=output_sequence,
cell_clip_threshold=clip,
reverse_input=(direction == "reverse"),
)
# Squeeze Output
# to output shape of [Seq Len or 1, Batch Size, Hidden Size]
_squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4])
# Squeeze Output H and Output C
# to output shape of [Batch Size, Hidden Size]
_squeeze(builder, op.outputs[1].name, output_names[1], axes=[0, 3, 4])
_squeeze(builder, op.outputs[2].name, output_names[2], axes=[0, 3, 4])
elif direction == "bidirectional":
# Expand initial_h and initial_c
# Issue #810
num_layer = len(builder.layers)
initial_h_expand = initial_h + "_expanded" + "_" + str(num_layer)
# from shape (B, 2*H) to shape (1, Batch, 2*H, 1, 1)
if not (initial_h_expand in set(builder.layers)):
_expand_dim(builder, initial_h_expand, initial_h, [0, 3, 4])
initial_h = initial_h_expand
# initial_h may have the same name as initial_c (e.g., same Var)
initial_c_expand = initial_c + "_expanded2" + "_" + str(num_layer)
if not (initial_c_expand in set(builder.layers)):
_expand_dim(builder, initial_c_expand, initial_c, [0, 3, 4])
initial_c = initial_c_expand
initial_h_f = initial_h + "_forward"
initial_h_r = initial_h + "_reverse"
initial_c_f = initial_c + "_forward"
initial_c_r = initial_c + "_reverse"
# split input_h and input_c into two parts
builder.add_split_nd(
name=op.name + "_split_h",
input_name=initial_h,
output_names=[initial_h_f, initial_h_r],
axis=2,
)
builder.add_split_nd(
name=op.name + "_split_c",
input_name=initial_c,
output_names=[initial_c_f, initial_c_r],
axis=2,
)
wt_ih_back = op.weight_ih_back.val
wt_hh_back = op.weight_hh_back.val
# Get weights here
# weight format: [I+H, 2*4*H] -> [I+H, 4*H (forward):4*H (backward)]
hidden_size = wt_hh.shape[1]
input_size = wt_ih.shape[1]
# f_w_x and r_w_x: [H*I, H*I, H*I, H*I]
# f_w_h and r_w_h: [H*H, H*H, H*H, H*H]
# where format is, [input gate, forget gate, output gate, cell gate]
w_x = _split(wt_ih, sections=4)
w_h = _split(wt_hh, sections=4)
r_w_x = _split(wt_ih_back, sections=4)
r_w_h = _split(wt_hh_back, sections=4)
# f_b and r_b format: [4*H]
b_back = op.bias_back.val if op.bias_back is not None else None
f_b, r_b = None, None
if b is not None:
f_b = _split(b, sections=4)
if b_back is not None:
r_b = _split(b_back, sections=4)
# peephole format: [2*3*H] -> [3*H (forward) : 3*H (backward)]
peephole_back = op.peephole_back.val if op.peephole_back is not None else None
f_peephole, r_peephole = None, None
if peephole is not None:
f_peephole = _split(peephole, sections=3)
if peephole_back is not None:
r_peephole = _split(peephole_back, sections=3)
output_names = [
op.outputs[0].name + "_5d", # Output Y [s/1, b, 2*h, 1, 1]
op.outputs[1].name + "_5d_foward", # Output Y_h [ 1, b, h, 1, 1]
op.outputs[2].name
+ "_5d_forward", # Output Y_c [ 1, b, h, 1, 1]
op.outputs[1].name
+ "_5d_reverse", # Output Y_h_reverse [ 1, b, h, 1, 1]
op.outputs[2].name + "_5d_reverse",
] # Output Y_c_reverse [ 1, b, h, 1, 1]
builder.add_bidirlstm(
name=op.name,
W_h=w_h,
W_x=w_x,
b=f_b,
W_h_back=r_w_h,
W_x_back=r_w_x,
b_back=r_b,
hidden_size=hidden_size,
input_size=input_size,
input_names=[
input_name,
initial_h_f,
initial_c_f,
initial_h_r,
initial_c_r,
],
output_names=output_names,
inner_activation=op.recurrent_activation.val,
cell_state_update_activation=op.cell_activation.val,
output_activation=op.activation.val,
peep=f_peephole,
peep_back=r_peephole,
output_all=output_sequence,
cell_clip_threshold=clip,
)
# Squeeze Output
# to output shape of [Seq Len or 1, Batch Size, 2*Hidden Size]
_squeeze(builder, op.outputs[0].name, output_names[0], axes=[3, 4])
# Output H is of format
# 1, Batch_Size, Hidden_Size, 1, 1
# Concat to make it
# 1, Batch_Size, 2*Hidden_Size, 1, 1
builder.add_elementwise(
name=op.outputs[1].name + "_5d",
input_names=[output_names[1], output_names[3]],
output_name=op.outputs[1].name + "_5d",
mode="CONCAT",
)
# Output C is of format
# 1, Batch_Size, Hidden_Size, 1, 1
builder.add_elementwise(
name=op.outputs[2].name + "_5d",
input_names=[output_names[2], output_names[4]],
output_name=op.outputs[2].name + "_5d",
mode="CONCAT",
)
# Squeeze Output H and Output C
# to output shape of [Batch Size, 2*Hidden Size]
_squeeze(
builder, op.outputs[1].name, op.outputs[1].name + "_5d", axes=[0, 3, 4]
)
_squeeze(
builder, op.outputs[2].name, op.outputs[2].name + "_5d", axes=[0, 3, 4]
)
else:
raise ValueError(
"Unknown direction {} for LSTM layer. Supported are forward, reverse or bidirectional".format(
direction
)
)
@register_mil_to_nn_mapping
def reshape(const_context, builder, op):
if op.shape.val is None:
builder.add_reshape_dynamic(
name=op.name,
input_names=make_input(const_context, builder, [op.x, op.shape]),
output_name=op.outputs[0].name,
)
elif -1 in op.shape.val and len(op.shape.val) == op.x.rank:
# Support 0 in shape.
builder.add_rank_preserving_reshape(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
output_shape=op.shape.val,
)
else:
if 0 in op.shape.val:
# Does not support 0 in shape
msg = "Use 0 in shape only if len(shape) == x.rank. Report bug."
raise ValueError(msg)
output_shape = (1,) if len(op.shape.val) == 0 or 0 in op.shape.shape else op.shape.val
builder.add_reshape_static(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
output_shape=output_shape,
)
@register_mil_to_nn_mapping
def reduce_argmax(const_context, builder, op):
builder.add_argmax(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axis=op.axis.val,
keepdims=op.keep_dims.val,
)
@register_mil_to_nn_mapping
def reduce_argmin(const_context, builder, op):
builder.add_argmin(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axis=op.axis.val,
keepdims=op.keep_dims.val,
)
def _reduce_axes(const_context, builder, builder_op, op):
axes = op.axes.val if op.axes is not None else op.axes
builder_op(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axes=axes,
keepdims=op.keep_dims.val,
reduce_all=axes is None,
)
@register_mil_to_nn_mapping
def reduce_l1_norm(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_l1, op)
@register_mil_to_nn_mapping
def reduce_l2_norm(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_l2, op)
@register_mil_to_nn_mapping
def reduce_log_sum(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_logsum, op)
@register_mil_to_nn_mapping
def reduce_log_sum_exp(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_logsumexp, op)
@register_mil_to_nn_mapping
def reduce_max(const_context, builder, op):
if not _try_convert_global_pool(const_context, builder, op, mode="max"):
_reduce_axes(const_context, builder, builder.add_reduce_max, op)
@register_mil_to_nn_mapping
def reduce_mean(const_context, builder, op):
if not _try_convert_global_pool(const_context, builder, op, mode="average"):
_reduce_axes(const_context, builder, builder.add_reduce_mean, op)
@register_mil_to_nn_mapping
def reduce_min(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_min, op)
@register_mil_to_nn_mapping
def reduce_prod(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_prod, op)
@register_mil_to_nn_mapping
def reduce_sum(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_sum, op)
@register_mil_to_nn_mapping
def reduce_sum_square(const_context, builder, op):
_reduce_axes(const_context, builder, builder.add_reduce_sumsquare, op)
@register_mil_to_nn_mapping
def reverse(const_context, builder, op):
reverse_dim = [False] * op.x.rank
if op.axes is None:
reverse_dim = [True] * op.x.rank
else:
for axis in op.axes.val:
reverse_dim[axis] = True
builder.add_reverse(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
reverse_dim=reverse_dim,
)
@register_mil_to_nn_mapping
def reverse_sequence(const_context, builder, op):
builder.add_reverse_sequence(
name=op.name,
input_names=make_input(const_context, builder, [op.x, op.lengths]),
output_name=op.outputs[0].name,
batch_axis=op.batch_axis.val,
seq_axis=op.seq_axis.val,
)
@register_mil_to_nn_mapping
def rnn(const_context, builder, op):
input_name = make_input(const_context, builder, op.x) # [b, s, I]
initial_h = make_input(const_context, builder, op.initial_h) # [b, H]
w_ih = op.weight_ih.val
w_hh = op.weight_hh.val
b = op.bias.val if op.bias is not None else None
direction = op.direction.val
output_sequence = op.output_sequence.val
activation = op.activation.val
# Add expand dims for input, in
_expand_dim(builder, input_name + "_expanded", input_name, [3, 4])
input_name += "_expanded"
if direction not in {"forward", "reverse"}:
raise ValueError(
"Unknown direction {} for RNN layer. Supported are forward and reverse".format(
direction
)
)
# Expand initial_h and initial_c
_expand_dim(builder, initial_h + "_expanded", initial_h, [2, 3, 4])
initial_h += "_expanded"
# w_x: (H, I)
# w_h: (H, H)
hidden_size = w_hh.shape[0]
input_size = w_ih.shape[-1]
# 3 outputs
# Y : [s/1, b, h, 1, 1]
# Y_h: [ 1, b, h, 1, 1]
output_names = [_output.name + "_5d" for _output in op.outputs]
builder.add_simple_rnn(
name=op.name,
W_h=w_hh,
W_x=w_ih,
b=b,
hidden_size=hidden_size,
input_size=input_size,
input_names=[input_name, initial_h],
output_names=output_names,
activation=activation,
output_all=output_sequence,
reverse_input=(direction == "reverse"),
)
# Squeeze Output
# to output shape of [Seq Len or 1, Batch Size, Hidden Size]
_squeeze(builder, op.outputs[0].name, output_names[0], [3, 4])
# Squeeze Output H and Output C
# to output shape of [Batch Size, Hidden Size]
_squeeze(builder, op.outputs[1].name, output_names[1], [0, 3, 4])
@register_mil_to_nn_mapping
def select(const_context, builder, op):
builder.add_where_broadcastable(
name=op.name,
input_names=make_input(const_context, builder, [op.cond, op.a, op.b]),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def space_to_depth(const_context, builder, op):
builder.add_reorganize_data(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
mode="SPACE_TO_DEPTH",
block_size=op.block_size.val,
)
@register_mil_to_nn_mapping
def batch_to_space(const_context, builder, op):
block_size = op.block_shape.val
if block_size[0] != block_size[1]:
raise ValueError("batch_to_space non-equal block shape is not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.")
block_size = block_size[0]
if block_size == 1:
raise ValueError("batch_to_space block shape == 1 not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.")
transpose_1_name = op.name + "_transpose_1"
builder.add_transpose(
name=transpose_1_name,
input_name=make_input(const_context, builder, op.x),
axes=[1, 0, 2, 3],
output_name=transpose_1_name,
)
depth_to_space_name = op.name + "_depth_to_space"
builder.add_reorganize_data(
name=depth_to_space_name,
input_name=transpose_1_name,
output_name=depth_to_space_name,
mode="DEPTH_TO_SPACE",
block_size=block_size,
)
crop_name = op.name + "_crop"
crops = op.crops.val
builder.add_crop(
name=crop_name,
input_names=[depth_to_space_name],
output_name=crop_name,
offset=0,
top=crops[0][0],
bottom=crops[0][1],
left=crops[1][0],
right=crops[1][1],
)
transpose_2_name = op.name + "_transpose_2"
builder.add_transpose(
name=transpose_2_name,
input_name=crop_name,
axes=[1, 0, 2, 3],
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def space_to_batch(const_context, builder, op):
block_size = op.block_shape.val
if block_size[0] != block_size[1]:
raise ValueError("space_to_batch non-equal block shape is not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.")
block_size = block_size[0]
if block_size == 1:
raise ValueError("space_to_batch block shape == 1 not supported in 'neuralnetwork' backend! Please change the convert_to to 'mlprogram'.")
pad = op.paddings.val.flatten()
left, right = pad[2], pad[3]
top, bottom = pad[0], pad[1]
pad_name = op.name + "_pad"
builder.add_padding(
name=pad_name,
left=left,
right=right,
top=top,
bottom=bottom,
input_name=make_input(const_context, builder, op.x),
output_name=pad_name,
padding_type="constant",
value=0.,
)
transpose_1_name = op.name + "_transpose_1"
builder.add_transpose(
name=transpose_1_name,
input_name=pad_name,
axes=[1, 0, 2, 3],
output_name=transpose_1_name,
)
space_to_depth_name = op.name + "_space_to_depth"
builder.add_reorganize_data(
name=space_to_depth_name,
input_name=transpose_1_name,
output_name=space_to_depth_name,
mode="SPACE_TO_DEPTH",
block_size=block_size,
)
transpose_2_name = op.name + "_transpose_2"
builder.add_transpose(
name=transpose_2_name,
input_name=space_to_depth_name,
axes=[1, 0, 2, 3],
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def transpose(const_context, builder, op):
builder.add_transpose(
name=op.name,
axes=op.perm.val,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def gather(const_context, builder, op):
is_embedding = False
if op.x.val is not None:
W = op.x.val
if len(W.shape) == 2:
if op.axis.val == 0 or op.axis.val == -2:
if len(op.x.child_ops) == 1:
# the constant feeding into the gather doesn't go to any other op
is_embedding = True
if is_embedding:
""""
The following:
%3 = gather(%1, %2, axis=0) # %1 is a constant matrix of shape (vocab_size, embedding_size)
can be mapped to:
%2_e = expand_dims(%2, axis=-1)
%3 = embeddingND(%2_e, weight=%1)
"""
builder.add_expand_dims(
name=op.name + "_expand_dims",
input_name=make_input(const_context, builder, op.indices),
output_name=op.name + "_expand_dims",
axes=[-1],
)
builder.add_embedding_nd(
name=op.name,
input_name=op.name + "_expand_dims",
output_name=op.outputs[0].name,
vocab_size=W.shape[0],
embedding_size=W.shape[1],
W=_np.transpose(W),
)
else:
builder.add_gather(
name=op.name,
input_names=make_input(const_context, builder, [op.x, op.indices]),
output_name=op.outputs[0].name,
axis=op.axis.val,
)
@register_mil_to_nn_mapping
def scatter(const_context, builder, op):
builder.add_scatter(
name=op.name,
input_names=make_input(
const_context, builder, [op.data, op.indices, op.updates]
),
output_name=op.outputs[0].name,
axis=op.axis.val,
mode=op.mode.val.upper(),
)
@register_mil_to_nn_mapping
def gather_along_axis(const_context, builder, op):
builder.add_gather_along_axis(
name=op.name,
input_names=make_input(const_context, builder, [op.x, op.indices]),
output_name=op.outputs[0].name,
axis=op.axis.val,
)
@register_mil_to_nn_mapping
def scatter_along_axis(const_context, builder, op):
builder.add_scatter_along_axis(
name=op.name,
input_names=make_input(
const_context, builder, [op.data, op.indices, op.updates]
),
output_name=op.outputs[0].name,
axis=op.axis.val,
mode=op.mode.val.upper(),
)
@register_mil_to_nn_mapping
def gather_nd(const_context, builder, op):
builder.add_gather_nd(
name=op.name,
input_names=make_input(
const_context, builder, [op.x, op.indices]
),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def scatter_nd(const_context, builder, op):
builder.add_scatter_nd(
name=op.name,
input_names=make_input(
const_context, builder, [op.data, op.indices, op.updates],
),
output_name=op.outputs[0].name,
mode=op.mode.val.upper(),
)
@register_mil_to_nn_mapping
def silu(const_context, builder, op):
'''
silu is:
y = x * sigmoid(x)
'''
inp = make_input(const_context, builder, op.x)
builder.add_activation(
name=op.name + "__silu_sigmoid__",
non_linearity="SIGMOID",
input_name=inp,
output_name=op.name + "__silu_sigmoid__",
)
builder.add_elementwise(
name=op.name,
input_names=[inp, op.name + "__silu_sigmoid__"],
output_name=op.outputs[0].name,
mode='MULTIPLY',
)
@register_mil_to_nn_mapping
def tile(const_context, builder, op):
inputs = [make_input(const_context, builder, op.x)]
if op.reps.val is None:
inputs.append(op.reps.name)
builder.add_tile(
name=op.name,
reps=op.reps.val,
input_name=inputs,
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def tanh(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="TANH",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def scaled_tanh(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="SCALED_TANH",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
params=[op.alpha.val, op.beta.val],
)
@register_mil_to_nn_mapping
def sigmoid(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="SIGMOID",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def sigmoid_hard(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="SIGMOID_HARD",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
params=[op.alpha.val, op.beta.val],
)
@register_mil_to_nn_mapping
def erf(const_context, builder, op):
builder.add_erf(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def thresholded_relu(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="THRESHOLDEDRELU",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
params=op.alpha.val,
)
@register_mil_to_nn_mapping
def elu(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="ELU",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
params=op.alpha.val,
)
@register_mil_to_nn_mapping
def leaky_relu(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="LEAKYRELU",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
params=[op.alpha.val],
)
@register_mil_to_nn_mapping
def gelu(const_context, builder, op):
builder.add_gelu(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
mode=op.mode.val,
)
@register_mil_to_nn_mapping
def softplus(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="SOFTPLUS",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def softmax(const_context, builder, op):
rank = op.x.rank
if op.axis.val == -3 or op.axis.val > 0 and op.axis.val == rank - 3:
builder.add_softmax(
name=op.name, input_name=op.x.name, output_name=op.outputs[0].name,
)
else:
builder.add_softmax_nd(
name=op.name,
input_name=op.x.name,
output_name=op.outputs[0].name,
axis=op.axis.val,
)
@register_mil_to_nn_mapping
def softplus_parametric(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="PARAMETRICSOFTPLUS",
input_name=make_input(const_context, builder, op.x),
input_shape=op.x.shape,
input_rank=op.x.rank,
output_name=op.outputs[0].name,
params=[op.alpha.val, op.beta.val],
)
@register_mil_to_nn_mapping
def softsign(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="SOFTSIGN",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def linear_activation(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="LINEAR",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
params=[op.alpha.val, op.beta.val],
)
@register_mil_to_nn_mapping
def relu(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="RELU",
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def clamped_relu(const_context, builder, op):
builder.add_clamped_relu(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
alpha=op.alpha.val,
beta=op.beta.val,
)
@register_mil_to_nn_mapping
def relu6(const_context, builder, op):
builder.add_activation(
name=op.name + "__relu6_relu__",
input_name=make_input(const_context, builder, op.x),
output_name=op.name + "__relu6_relu__",
non_linearity="RELU",
)
builder.add_activation(
name=op.name + "__relu6_neg__",
input_name=op.name + "__relu6_relu__",
output_name=op.name + "__relu6_neg__",
non_linearity="LINEAR",
params=[-1, 0],
)
builder.add_unary(
name=op.name + "__relu6_threshold6__",
input_name=op.name + "__relu6_neg__",
output_name=op.name + "__relu6_threshold6__",
mode="threshold",
alpha=-6,
)
builder.add_activation(
name=op.name,
input_name=op.name + "__relu6_threshold6__",
output_name=op.outputs[0].name,
non_linearity="LINEAR",
params=[-1, 0],
)
@register_mil_to_nn_mapping
def prelu(const_context, builder, op):
builder.add_activation(
name=op.name,
non_linearity="PRELU",
input_name=make_input(const_context, builder, op.x),
input_shape=op.x.shape,
input_rank=op.x.rank,
output_name=op.outputs[0].name,
params=op.alpha.val,
)
@register_mil_to_nn_mapping
def pad(const_context, builder, op):
if len(op.pad.shape) != 1:
raise ValueError("Pad should be a 1D tensor.")
pad = op.pad.val
mode = op.mode.val
constant_val = op.constant_val.val
nn_mode_mapping = {"reflect": "reflection", "replicate": "replication"}
mode = nn_mode_mapping.get(mode, mode)
if pad is not None:
missing_dims = op.x.rank - len(pad) // 2
pad = [0, 0] * missing_dims + list(pad)
if pad is not None and op.x.rank > 1 and all(i == 0 for i in pad[:-4]):
pad = pad[-4:]
left, right = pad[2], pad[3]
top, bottom = pad[0], pad[1]
builder.add_padding(
name=op.name,
left=left,
right=right,
top=top,
bottom=bottom,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
padding_type=mode,
value=constant_val,
)
elif mode == "constant":
if pad is None:
builder.add_constant_pad(
name=op.name,
input_names=make_input(const_context, builder, [op.x, op.pad]),
output_name=op.outputs[0].name,
value=constant_val
)
else:
builder.add_constant_pad(
name=op.name,
input_names=make_input(const_context, builder, [op.x]),
output_name=op.outputs[0].name,
value=constant_val,
pad_amounts=pad,
)
else:
raise ValueError("Unsupported mode for Pad layer! {}".format(mode))
@register_mil_to_nn_mapping
def instance_norm(const_context, builder, op):
channels = op.x.shape[1]
gamma = _np.array([1.0] * channels) if op.gamma is None else op.gamma.val
beta = _np.array([0.0] * channels) if op.beta is None else op.beta.val
x_name = make_input(const_context, builder, op.x)
out_name = op.outputs[0].name
if op.x.rank == 3:
x_name = op.name + "_expanded"
builder.add_expand_dims(
name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2],
)
out_name += "_instance_norm"
builder.add_batchnorm(
name=op.name,
channels=channels,
gamma=gamma,
beta=beta,
input_name=x_name,
output_name=out_name,
compute_mean_var=True,
instance_normalization=True,
epsilon=op.epsilon.val,
)
# Squeeze added `Height` dimension for 1d case
if op.x.rank == 3:
x_name = op.name + "_squeeze"
builder.add_squeeze(
name=x_name,
input_name=out_name,
output_name=op.outputs[0].name,
axes=[-2],
)
@register_mil_to_nn_mapping
def l2_norm(const_context, builder, op):
builder.add_l2_normalize(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
epsilon=op.epsilon.val,
)
@register_mil_to_nn_mapping
def layer_norm(const_context, builder, op):
rank = op.x.rank
input_shape = [-1 if is_symbolic(dim) else dim for dim in list(op.x.shape)]
axes = list(range(op.x.rank)) if op.axes.val is None else op.axes.val
axes = [axis+rank if axis < 0 else axis for axis in op.axes.val]
epsilon = op.epsilon.val
# if input shape = (X1, X2) or (X0, X1, X2), axes = [-1], X1 and X2 are known
# then the following operations are performed
# - reshape to (X1, 1, X2) / (X0, X1, 1, X2)
# - apply MVN layer, which normalizes across last 2 dims
# - apply scale layer
# - reshape back to (X1, X2) / (X0, X1, X2)
# Otherwise, we express the layer_norm as primitive operations
if rank in [2, 3] and len(axes) == 1 and axes[0] == rank - 1 and input_shape.count(-1) < 2 \
and input_shape[-1] != -1 and input_shape[-2] != -1:
reshaped_shape = input_shape[:]
# Insert a singleton dimension in the 'height' position
reshaped_shape.insert(-1, 1)
# Scale layer can't take parameters of size [W], but can take [1, H, W], and H=1 in this case
gamma = _np.ones((1, 1, reshaped_shape[-1])) if op.gamma is None else _np.expand_dims(op.gamma.val, axis=(0, 1))
beta = _np.zeros((1, 1, reshaped_shape[-1])) if op.beta is None else _np.expand_dims(op.beta.val, axis=(0, 1))
builder.add_reshape_static(
name=op.name + "_reshape",
input_name=make_input(const_context, builder, op.x),
output_name=op.name + "_reshape",
output_shape=reshaped_shape,
)
builder.add_mvn(
name=op.name + "_mvn",
input_name=op.name + "_reshape",
output_name=op.name + "_mvn",
across_channels=False,
normalize_variance=True,
epsilon=epsilon,
)
builder.add_scale(
name=op.name + "_scale",
input_name=op.name + "_mvn",
output_name=op.name + "_scale",
W=gamma,
b=beta,
has_bias=True,
shape_scale=_np.shape(gamma),
shape_bias=_np.shape(beta),
)
builder.add_reshape_static(
name=op.name,
input_name=op.name + "_scale",
output_name=op.outputs[0].name,
output_shape=input_shape,
)
else: # We don't meet the conditions for an MVN layer, so we use primitives
mean_name = op.name + "_mean"
builder.add_reduce_mean(
name=mean_name,
input_name=make_input(const_context, builder, op.x),
output_name=mean_name,
axes=axes,
keepdims=True,
reduce_all=False,
)
sub_mean_name = op.name + "_sub_mean"
builder.add_subtract_broadcastable(
name=sub_mean_name,
input_names=[op.x.name, mean_name],
output_name=sub_mean_name,
)
square_name = op.name + '_square'
builder.add_unary(
name=square_name,
input_name=sub_mean_name,
output_name=square_name,
mode="power",
alpha=2.0,
)
square_sum_name = op.name + '_square_sum'
builder.add_reduce_sum(
name=square_sum_name,
input_name=square_name,
output_name=square_sum_name,
axes=axes,
keepdims=True,
reduce_all=False,
)
normalized_shape = [op.x.shape[i] if i in axes else 1 for i in range(rank)]
if not any_symbolic(normalized_shape):
div_prod_name = op.name + '_div_constant'
add_const(const_context, builder, div_prod_name, _np.prod(normalized_shape))
else:
raise NotImplementedError("dynamic shape input nor supported for layer_norm")
div_square_sum_name = op.name + '_div_square_sum'
builder.add_divide_broadcastable(
name=div_square_sum_name,
input_names=[square_sum_name, div_prod_name],
output_name=div_square_sum_name
)
epsilon_const_name = op.name + '_epsilon'
add_const(const_context, builder, epsilon_const_name, epsilon)
add_epsilon_name = op.name + '_add_epsilon'
builder.add_elementwise(
name=add_epsilon_name,
input_names=[div_square_sum_name, epsilon_const_name],
output_name=add_epsilon_name,
mode="ADD",
)
sqrt_name = op.name + '_sqrt'
builder.add_unary(
name=sqrt_name,
input_name=add_epsilon_name,
output_name=sqrt_name,
mode="sqrt",
)
div_name = op.name + '_divide'
builder.add_divide_broadcastable(
name=div_name,
input_names=[sub_mean_name, sqrt_name],
output_name=div_name
)
gamma = _np.ones(normalized_shape) if op.gamma is None else _np.reshape(op.gamma.val, normalized_shape)
beta = _np.zeros(normalized_shape) if op.beta is None else _np.reshape(op.beta.val, normalized_shape)
gamma_name = op.name + '_gamma'
beta_name = op.name + '_beta'
add_const(const_context, builder, gamma_name, gamma)
add_const(const_context, builder, beta_name, beta)
mul_name = op.name + '_mul'
builder.add_multiply_broadcastable(
name=mul_name,
input_names=[div_name, gamma_name],
output_name=mul_name,
)
builder.add_add_broadcastable(
name=op.name,
input_names=[mul_name, beta_name],
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def local_response_norm(const_context, builder, op):
builder.add_lrn(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
alpha=op.alpha.val,
beta=op.beta.val,
local_size=op.size.val,
k=op.k.val,
)
@register_mil_to_nn_mapping
def conv_transpose(const_context, builder, op):
x_name = make_input(const_context, builder, op.x)
out_name = op.outputs[0].name
# Special handling for 1d conv transpose
is_conv_transpose_1d = op.x.rank == 3
is_conv_transpose_2d = op.x.rank == 4
is_conv_transpose_3d = op.x.rank == 5
if is_conv_transpose_1d:
x_name = op.name + "_expand_dim"
out_name = op.name + "_expanded"
builder.add_expand_dims(
name=x_name, input_name=op.x.name, output_name=x_name, axes=[-2]
)
# Input names to be used
input_names = [x_name]
# Kernel shape: [C_in, C_out, D, H, W]
weight = op.weight.val
kernel_channels = weight.shape[0]
output_channels = weight.shape[1] * op.groups.val
if is_conv_transpose_1d:
weight = _np.expand_dims(weight, -2)
# pyMIL Deconvolution format: [C_in, C_out / groups, spatial_dims]
# NN DeConvolution3D expects weights to have shape (C_out / groups, C_in, spatial_dims)
# NN DeConvolution2D/1D expects (spatial_dims, C_in, C_out/groups)
if is_conv_transpose_3d:
weight = _np.transpose(weight, [1, 0, 2, 3, 4])
else:
weight = _np.transpose(weight, [2, 3, 0, 1])
strides = op.strides.val.tolist()
dilations = op.dilations.val.tolist()
output_spatial_dims = list(op.outputs[0].shape[2:])
if is_conv_transpose_1d:
dilations = dilations[:-1] + [1] + dilations[-1:]
strides = strides[:-1] + [1] + strides[-1:]
# Must be at least 2D
output_spatial_dims = output_spatial_dims[:-1] + [1] + output_spatial_dims[-1:]
if any_symbolic(output_spatial_dims):
output_spatial_dims = None
# padding
padding_mode = op.pad_type.val
pad = {}
if padding_mode == "custom":
if is_conv_transpose_1d:
padding_mode = "valid"
pad["padding_top"] = 0
pad["padding_bottom"] = 0
pad["padding_left"] = op.pad.val[0] # Left
pad["padding_right"] = op.pad.val[1] # Right
elif is_conv_transpose_2d:
padding_mode = "valid"
pad["padding_top"] = op.pad.val[0] # Top
pad["padding_bottom"] = op.pad.val[1] # Bottom
pad["padding_left"] = op.pad.val[2] # Left
pad["padding_right"] = op.pad.val[3] # Right
else:
pad["padding_front"] = op.pad.val[0] # Front
pad["padding_back"] = op.pad.val[1] # Back
pad["padding_top"] = op.pad.val[2] # Top
pad["padding_bottom"] = op.pad.val[3] # Bottom
pad["padding_left"] = op.pad.val[4] # Left
pad["padding_right"] = op.pad.val[5] # Right
groups = op.groups.val
has_bias = op.bias is not None
if is_conv_transpose_3d:
builder.add_convolution3d(
name=op.name,
input_channels=kernel_channels,
output_channels=output_channels,
depth=weight.shape[-3],
height=weight.shape[-2],
width=weight.shape[-1],
W=weight,
b=op.bias.val if has_bias else None,
has_bias=has_bias,
groups=groups,
stride_depth=strides[0],
stride_height=strides[1],
stride_width=strides[2],
dilation_depth=dilations[0],
dilation_height=dilations[1],
dilation_width=dilations[2],
padding_mode=padding_mode,
is_deconv=True,
output_shape=output_spatial_dims,
input_name=input_names,
output_name=out_name,
**pad
)
else:
builder.add_convolution(
name=out_name,
kernel_channels=kernel_channels,
output_channels=output_channels,
height=weight.shape[0],
width=weight.shape[1],
stride_height=strides[0],
stride_width=strides[1],
border_mode=padding_mode,
groups=groups,
W=weight,
b=op.bias.val if has_bias else None,
has_bias=has_bias,
is_deconv=True,
output_shape=output_spatial_dims,
input_name=input_names,
output_name=out_name,
dilation_factors=dilations,
**pad
)
# Squeeze added `Height` dimension for 1d case
if is_conv_transpose_1d:
builder.add_squeeze(
name=op.name,
input_name=out_name,
output_name=op.outputs[0].name,
axes=[-2],
)
@register_mil_to_nn_mapping
def range_1d(const_context, builder, op):
if op.start.val is not None and op.step.val is not None:
inputs = [op.end]
elif op.start.val is None and op.step.val is not None:
inputs = [op.end, op.start]
elif op.start.val is not None and op.step.val is None:
inputs = [op.end, op.start, op.step]
else:
inputs = [op.end, op.start, op.step]
builder.add_range_dynamic(
name=op.name,
output_name=op.outputs[0].name,
input_names=make_input(const_context, builder, inputs),
start=op.start.val if op.start.val is not None else 0,
step=op.step.val if op.step.val is not None else 1,
)
@register_mil_to_nn_mapping
def one_hot(const_context, builder, op):
if op.one_hot_vector_size.val is not None:
inputs = [op.indices]
else:
inputs = [op.indices, op.one_hot_vector_size]
builder.add_one_hot(
name=op.name,
input_names=make_input(const_context, builder, inputs),
output_name=op.outputs[0].name,
one_hot_vector_size=op.one_hot_vector_size.val,
axis=op.axis.val,
on_value=op.on_value.val,
off_value=op.off_value.val,
)
@register_mil_to_nn_mapping
def non_maximum_suppression(const_context, builder, op):
builder.add_nms(
name=op.name,
input_names=make_input(const_context, builder, [op.boxes, op.scores]),
output_names=[op.outputs[i].name for i in range(4)],
iou_threshold=op.iou_threshold.val,
score_threshold=op.score_threshold.val,
max_boxes=op.max_boxes.val,
per_class_suppression=op.per_class_suppression.val,
)
@register_mil_to_nn_mapping
def flatten2d(const_context, builder, op):
builder.add_flatten_to_2d(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axis=op.axis.val,
)
@register_mil_to_nn_mapping
def shape(const_context, builder, op):
builder.add_get_shape(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
def add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w):
if _np.abs(_np.round(scale_factor_h) - scale_factor_h) < 1e-4 and scale_factor_h >= 1 - 1e-4:
scale_factor_h = int(scale_factor_h)
else:
raise NotImplementedError(
"Unsupported float type 'scale_factor_height' ({scale_factor_h}) for neuralnetwork."
)
if _np.abs(_np.round(scale_factor_w) - scale_factor_w) < 1e-4 and scale_factor_w >= 1 - 1e-4:
scale_factor_w = int(scale_factor_w)
else:
raise NotImplementedError(
"Unsupported float type 'scale_factor_width' ({scale_factor_w}) for neuralnetwork."
)
builder.add_upsample(
name=op.name,
scaling_factor_h=scale_factor_h,
scaling_factor_w=scale_factor_w,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
mode="NN",
)
@register_mil_to_nn_mapping
def resize_nearest_neighbor(const_context, builder, op):
Hout, Wout = op.target_size_height.val, op.target_size_width.val
x_shape = op.x.shape
Hin, Win = x_shape[-2], x_shape[-1]
scale_factor_h = Hout / Hin if Hout % Hin == 0 else (Hout + 1e-4) / Hin
scale_factor_w = Wout / Win if Wout % Win == 0 else (Wout + 1e-4) / Win
add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w)
@register_mil_to_nn_mapping
def upsample_nearest_neighbor(const_context, builder, op):
scale_factor_h = op.scale_factor_height.val
scale_factor_w = op.scale_factor_width.val
add_upsample_nn(const_context, builder, op, scale_factor_h, scale_factor_w)
@register_mil_to_nn_mapping
def upsample_bilinear(const_context, builder, op):
builder.add_upsample(
name=op.name,
scaling_factor_h=op.scale_factor_height.val,
scaling_factor_w=op.scale_factor_width.val,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
mode="BILINEAR",
linear_upsample_mode="ALIGN_CORNERS_TRUE" if op.align_corners.val else "ALIGN_CORNERS_FALSE",
)
@register_mil_to_nn_mapping
def resize_bilinear(const_context, builder, op):
grid_sampling_mode_map = {
"STRICT_ALIGN_CORNERS": "STRICT_ALIGN_ENDPOINTS_MODE",
"ALIGN_CORNERS": "ALIGN_ENDPOINTS_MODE",
"DEFAULT": "UPSAMPLE_MODE",
"OFFSET_CORNERS": "ROI_ALIGN_MODE"
}
if op.sampling_mode.val not in grid_sampling_mode_map:
raise NotImplementedError(
"Unsupported 'sampling_mode' ('{op.sampling_mode.val}') in neuralnetwork backend"
)
builder.add_resize_bilinear(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
target_height=op.target_size_height.val,
target_width=op.target_size_width.val,
mode=grid_sampling_mode_map[op.sampling_mode.val],
)
@register_mil_to_nn_mapping
def cond(const_context, builder, op):
true_block = op.blocks[0]
false_block = op.blocks[1]
branch_layer = builder.add_branch(
name=op.name, input_name=make_input(const_context, builder, op.pred),
)
true_builder = neural_network.NeuralNetworkBuilder(
nn_spec=branch_layer.branch.ifBranch,
disable_rank5_shape_mapping=True,
use_float_arraytype=True,
)
convert_ops(const_context, true_builder, true_block.operations, true_block.outputs)
# Copy block output to cond op output.
for block_out, op_out in zip(true_block.outputs, op.outputs):
true_builder.add_copy(
name=block_out.name + "_ret_copy",
# No need to make_input for block_out which is guaranteed
# to be a node
input_name=block_out.name,
output_name=op_out.name,
)
false_builder = neural_network.NeuralNetworkBuilder(
nn_spec=branch_layer.branch.elseBranch,
disable_rank5_shape_mapping=True,
use_float_arraytype=True,
)
convert_ops(
const_context, false_builder, false_block.operations, false_block.outputs
)
for block_out, op_out in zip(false_block.outputs, op.outputs):
false_builder.add_copy(
name=block_out.name + "_ret_copy",
input_name=block_out.name,
output_name=op_out.name,
)
@register_mil_to_nn_mapping
def while_loop(const_context, builder, op):
cond_block = op.blocks[0]
body_block = op.blocks[1]
# Assume that all loop vars aren't loop invariant (invariant loop vars
# should've be optimized away in graph passes).
for v_in, vx_in in zip(op.loop_vars, cond_block.inputs):
assert v_in.name != vx_in.name, "Loop invariant detected in {}".format(op)
builder.add_copy(
name=vx_in.name + "_input_copy",
input_name=make_input(const_context, builder, v_in),
output_name=vx_in.name,
)
loop_layer = builder.add_loop(
name=op.name,
# max_iterations=0 to use condition network.
max_iterations=0,
)
# Construct while_loop condition
cond_builder = neural_network.NeuralNetworkBuilder(
nn_spec=loop_layer.loop.conditionNetwork,
disable_rank5_shape_mapping=True,
use_float_arraytype=True,
)
cond_builder.rank_dict = {k.name: builder.rank_dict[k.name] for k in cond_block.inputs}
convert_ops(
const_context,
cond_builder,
cond_block.operations,
cond_block.outputs,
)
loop_layer.loop.conditionVar = cond_block.outputs[0].name
# while_loop body produces loop_vars
body_builder = neural_network.NeuralNetworkBuilder(
nn_spec=loop_layer.loop.bodyNetwork,
disable_rank5_shape_mapping=True,
use_float_arraytype=True,
)
body_builder.rank_dict = {k.name: builder.rank_dict[k.name] for k in body_block.inputs}
convert_ops(
const_context,
body_builder,
body_block.operations,
body_block.outputs,
)
# Also assume all outputs are different from loop inputs (i.e., no loop
# invariant.)
for vx_in, vx_out in zip(body_block.inputs, body_block.outputs):
if vx_in.name == vx_out.name:
msg = "Loop invariant var {} detected in block {}"
logger.warning(msg.format(vx_in.name, body_block.name))
continue
body_builder.add_copy(
name=vx_in.name + "_ret_copy",
input_name=make_input(const_context, builder, vx_out),
output_name=vx_in.name,
)
@register_mil_to_nn_mapping
def identity(const_context, builder, op):
builder.add_copy(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def concat(const_context, builder, op):
# filter out input tensor with 0 size
values = []
for v in op.values:
if len(v.shape) > 0 and v.shape[op.axis.val] == 0:
continue
values.append(v)
if len(values) == 0:
raise NotImplementedError('0 size tensor unsupported.')
if len(values) >= 2:
rank = values[0].rank
if op.interleave.val:
builder.add_concat_nd(
name=op.name,
input_names=make_input(const_context, builder, values),
output_name=op.outputs[0].name,
axis=op.axis.val,
interleave=True)
elif rank >= 4 and (op.axis.val == -3 or op.axis.val > 0 and op.axis.val == rank - 3):
builder.add_elementwise(
name=op.name,
input_names=make_input(const_context, builder, values),
output_name=op.outputs[0].name,
mode="CONCAT",
)
else:
builder.add_concat_nd(
name=op.name,
input_names=make_input(const_context, builder, values),
output_name=op.outputs[0].name,
axis=op.axis.val)
else:
builder.add_copy(
name=op.name,
input_name=make_input(const_context, builder, values[0]),
output_name=op.outputs[0].name)
@register_mil_to_nn_mapping
def stack(const_context, builder, op):
builder.add_stack(
name=op.name,
input_names=make_input(const_context, builder, op.values),
output_name=op.outputs[0].name,
axis=op.axis.val,
)
@register_mil_to_nn_mapping
def split(const_context, builder, op):
split = op.sizes
split = [size for size in split if size != 0]
has_equal_splits = all([size == split[0] for size in split])
num_splits = len(split)
output_names = [op.outputs[i].name for i in range(len(op.sizes)) if op.sizes[i] != 0]
if has_equal_splits:
builder.add_split_nd(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_names=output_names,
axis=op.axis.val,
num_splits=num_splits)
else:
builder.add_split_nd(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_names=output_names,
axis=op.axis.val,
split_sizes=list(split))
@register_mil_to_nn_mapping
def argsort(const_context, builder, op):
axis = op.x.rank + op.axis.val if op.axis.val < 0 else op.axis.val
builder.add_argsort(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axis=axis,
descending=(not op.ascending.val),
)
@register_mil_to_nn_mapping
def pixel_shuffle(const_context, builder, op):
builder.add_reorganize_data(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
mode="PIXEL_SHUFFLE",
block_size=op.upscale_factor.val,
)
@register_mil_to_nn_mapping
def sliding_windows(const_context, builder, op):
builder.add_sliding_windows(
name=op.name,
input_name=make_input(const_context, builder, op.x),
output_name=op.outputs[0].name,
axis=op.axis.val,
window_size=op.size.val,
step=op.stride.val,
)
@register_mil_to_nn_mapping
def crop(const_context, builder, op):
builder.add_crop(
name=op.name,
input_names=[op.x.name],
output_name=op.outputs[0].name,
offset=0,
left=op.crop_width.val[0],
right=op.crop_width.val[1],
top=op.crop_height.val[0],
bottom=op.crop_height.val[1],
)
@register_mil_to_nn_mapping
def crop_resize(const_context, builder, op):
grid_sampling_mode_map = {
"STRICT_ALIGN_CORNERS": "STRICT_ALIGN_ENDPOINTS_MODE",
"ALIGN_CORNERS": "ALIGN_ENDPOINTS_MODE",
"DEFAULT": "UPSAMPLE_MODE",
"OFFSET_CORNERS": "ROI_ALIGN_MODE",
}
if op.sampling_mode.val not in grid_sampling_mode_map:
raise NotImplementedError(
"Unsupported 'sampling_mode' ('{}') in neuralnetwork backend".format(
op.sampling_mode.val
)
)
mode = grid_sampling_mode_map[op.sampling_mode.val]
input_expanded = op.name + "_x_expand"
builder.add_expand_dims(
name=input_expanded,
input_name=make_input(const_context, builder, op.x),
output_name=input_expanded,
axes=[0],
)
builder.add_crop_resize(
name=op.name,
input_names=make_input(const_context, builder, [input_expanded, op.roi]),
output_name=op.outputs[0].name,
target_height=op.target_height.val,
target_width=op.target_width.val,
mode=mode,
normalized_roi=op.normalized_coordinates.val,
box_indices_mode=op.box_coordinate_mode.val,
spatial_scale=op.spatial_scale.val,
)
@register_mil_to_nn_mapping
def custom_op(const_context, builder, op):
class_name = op.bindings.get("class_name", op.name)
input_order = op.bindings.get("input_order", [])
parameters = op.bindings.get("parameters", [])
weights = op.bindings.get("weights", [])
description = op.bindings.get("description", "")
if len(input_order) == 0:
raise ValueError("Inputs not provided for Custom Layer: {}".format(op.name))
# Get input names
inputs = [op.inputs[_name] for _name in input_order]
# Get output names
output_names = [_output.name for _output in op.outputs]
# Load custom params
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = class_name
params.description = description
# Load parameters
for _param in parameters:
param = op.inputs[_param]
param_val = param.val
if types.is_bool(param.dtype):
params.parameters[_param].boolValue = param_val
elif types.is_int(param.dtype):
params.parameters[_param].intValue = param_val
elif types.is_float(param.dtype):
params.parameters[_param].doubleValue = param_val
elif types.is_str(param.dtype):
params.parameters[_param].stringValue = param_val
else:
raise ValueError(
"Unknown parameter type for custom layer- "
"Op: {}, Parameter: {}, Type: {}".format(op.name, _param, param.dtype)
)
# Load weights
for _weight in weights:
wt = params.weights.add()
wt.floatValue.extend(map(float, _weight))
# Add a custom layer
builder.add_custom(
name=op.name,
input_names=make_input(const_context, builder, inputs),
output_names=output_names,
custom_proto_spec=params,
)
@register_mil_to_nn_mapping
def make_list(const_context, builder, op):
# Set a initial size
size = op.init_length.val
# set the dynamic dimensions to 1 for initialization
# Ex: op.elem_shape = [i0, 128] will result in [1, 128]
elem_shape = [1 if isinstance(dim_var.val, str) else
dim_var.val for dim_var in op.elem_shape]
if size is not None:
array_size = size if size > 0 else 1
array_shape = [array_size] + elem_shape
add_const(
const_context,
builder,
op.outputs[0].name,
val=_np.zeros(array_shape, dtype="float"),
)
else:
if len(elem_shape) > 0:
node_es_name = op.name + "_element_shape"
add_const(
const_context,
builder,
node_es_name,
val=_np.array(elem_shape, dtype="float"),
)
# Concatenate list length of the input, should be a constant vector of size 1) with element shape
node_arr_shape_name = op.name + "_arr_shape"
builder.add_concat_nd(
name=node_arr_shape_name,
input_names=[op.init_length.name, node_es_name],
output_name=node_arr_shape_name,
axis=0,
)
else:
raise ValueError("elem_shape should have length > 0.")
builder.add_fill_dynamic(
name=op.name, input_name=node_arr_shape_name, output_name=op.outputs[0].name
)
def _realloc_list(const_context, builder, ls_var, index_var, value_var, mode):
# we do two things in this helper function
# (1)
# check if we need to re-initialize the tensorarray:
# it happens when the elem_shape is runtime determined and the runtime shape is not equal to
# the default shape. Ex: elem_shape is = [i0, 10] (initilized with [1, 10]) and at the runtime we get [2, 10].
# (2)
# If index_var >= len(ls_var), reallocate the array and copy over existing
# contents
# index_var: str or Var
# ls_var: Var
# check if elem_shape is runtime-determined
elem_shape = tuple(value_var.shape)
has_dynamic_shape = any([is_symbolic(i) for i in elem_shape])
# get the fill shape of the tensor array
# [length, elem_dim1, elem_dim2, ...]
full_shape_name = ls_var.name + "_full_shape"
builder.add_get_shape(
name=full_shape_name,
input_name=ls_var.name, # no need to make_input
output_name=full_shape_name,
)
# slice shape [length, elem_dim1, elem_dim2, ...] to get current length
curr_len_name = ls_var.name + "_length"
builder.add_slice_static(
name=curr_len_name,
input_name=full_shape_name,
output_name=curr_len_name,
begin_ids=[0],
end_ids=[1],
begin_masks=[False],
end_masks=[False],
strides=[1],
)
value_elem_shape_name = ls_var.name + '_value_elem_shape'
if has_dynamic_shape:
# get elem_shape from value if it is runtime-determined
# this is similar to what the backfill_make_list_elem_type tf graph pass does.
# if mode == "list_write", elem_shape equal to value.shape,
# if mode == "list_scatter", elem_shape equal to value.shape[1:]
if mode == "list_write":
builder.add_get_shape(
name=value_elem_shape_name,
input_name=make_input(const_context, builder, value_var),
output_name=value_elem_shape_name,
)
elif mode == "list_scatter":
raw_value_elem_shape_name = ls_var.name + '_raw_value_elem_shape'
builder.add_get_shape(
name=raw_value_elem_shape_name,
input_name=make_input(const_context, builder, value_var),
output_name=raw_value_elem_shape_name,
)
builder.add_slice_static(
name=value_elem_shape_name,
input_name=raw_value_elem_shape_name,
output_name=value_elem_shape_name,
begin_ids=[1],
end_ids=[-1],
begin_masks=[False],
end_masks=[True],
strides=[1],
)
else:
add_const(const_context, builder, value_elem_shape_name, _np.array(elem_shape))
# if elem_shape is runtime-determined, check if we need to re-initialize the array
if has_dynamic_shape:
# slice shape [length, elem_dim1, elem_dim2, ...] to get list elem_shape
curr_elem_shape_name = ls_var.name + "_ls_elem_shape"
builder.add_slice_static(
name=curr_elem_shape_name,
input_name=full_shape_name,
output_name=curr_elem_shape_name,
begin_ids=[1],
end_ids=[-1],
begin_masks=[False],
end_masks=[True],
strides=[1],
)
# test if the runtime elem_shape from the list and value are equal
not_equal_name = ls_var.name + '_elem_shape_not_equal'
builder.add_not_equal(
name=not_equal_name,
input_names=[curr_elem_shape_name, value_elem_shape_name],
output_name=not_equal_name,
)
reduce_any_name = ls_var.name + '_reduce_any'
builder.add_reduce_sum(
name=reduce_any_name,
input_name=not_equal_name,
output_name=reduce_any_name,
axes=[0],
keepdims=False,
reduce_all=True,
)
# if the two elem_shape are different, then re initialize the list with elem_shape from the value
re_initialize_condition_name = ls_var.name + "_condition_re_initialize"
layer = builder.add_branch(name=re_initialize_condition_name, input_name=reduce_any_name)
true_builder = neural_network.NeuralNetworkBuilder(
nn_spec=layer.branch.ifBranch,
disable_rank5_shape_mapping=True,
use_float_arraytype=True,
)
re_initialize_shape_name = ls_var.name + "_re_initialize_shape"
true_builder.add_concat_nd(
name=re_initialize_shape_name,
input_names=[curr_len_name, value_elem_shape_name],
output_name=re_initialize_shape_name,
axis=0,
)
re_initialize_name = ls_var.name + "_re_initialize"
true_builder.add_fill_dynamic(
name=re_initialize_name,
input_name=re_initialize_shape_name,
output_name=re_initialize_name,
value=0.0,
)
true_builder.add_copy(
name=ls_var.name + "_re_initialize_assign",
input_name=re_initialize_name,
output_name=ls_var.name
)
# after re-initialize the list, we now check if we need to reallocate the list
# check if the index > curr_length
is_growing_name = ls_var.name + "_is_growing"
builder.add_greater_than(
name=is_growing_name,
input_names=make_input(const_context, builder, [index_var, curr_len_name]),
output_name=is_growing_name,
use_greater_than_equal=True,
)
condition_name = ls_var.name + "_condition"
layer = builder.add_branch(name=condition_name, input_name=is_growing_name)
true_builder = neural_network.NeuralNetworkBuilder(
nn_spec=layer.branch.ifBranch,
disable_rank5_shape_mapping=True,
use_float_arraytype=True,
)
# alloc_length_name0 = index - list_length
alloc_length_name0 = ls_var.name + "_extra_length0"
true_builder.add_subtract_broadcastable(
name=alloc_length_name0,
input_names=make_input(const_context, builder, [index_var, curr_len_name]),
output_name=alloc_length_name0,
)
# alloc_length_name1 = index - list_length + 1
alloc_length_name1 = ls_var.name + "_extra_length1"
true_builder.add_elementwise(
name=alloc_length_name1,
input_names=[alloc_length_name0],
mode="ADD",
output_name=alloc_length_name1,
alpha=1,
)
# alloc_shape_name = [alloc_length] + elem_shape
alloc_shape_name = ls_var.name + "_alloc_shape"
true_builder.add_concat_nd(
name=alloc_shape_name,
input_names=[alloc_length_name1, value_elem_shape_name],
output_name=alloc_shape_name,
axis=0,
)
# new_alloc_name is np.zeros([alloc_length] + elem_shape)
new_alloc_name = ls_var.name + "_alloc"
true_builder.add_fill_dynamic(
name=new_alloc_name,
input_name=alloc_shape_name,
output_name=new_alloc_name,
value=0.0,
)
# new_list_name is np.concat([old_list, new_alloc])
new_list_name = ls_var.name + "_new"
true_builder.add_concat_nd(
name=new_list_name,
input_names=[ls_var.name, new_alloc_name],
output_name=new_list_name,
axis=0,
)
# Copy new_list_name to ls_var.name
true_builder.add_copy(
name=ls_var.name + "_assign", input_name=new_list_name, output_name=ls_var.name
)
@register_mil_to_nn_mapping
def list_write(const_context, builder, op):
_realloc_list(const_context, builder, op.ls, op.index, op.value, "list_write")
# expanded_value_name is [1, op.value]
expanded_value_name = op.ls.name + '_' + op.value.name + "_expanded"
builder.add_expand_dims(
name=expanded_value_name,
input_name=make_input(const_context, builder, op.value),
output_name=expanded_value_name,
axes=[0],
)
builder.add_scatter(
name=op.name,
input_names=make_input(
const_context, builder, [op.ls, op.index, expanded_value_name]
),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def list_gather(const_context, builder, op):
builder.add_gather(
name=op.name,
input_names=make_input(const_context, builder, [op.ls, op.indices]),
output_name=op.outputs[0].name,
axis=0,
)
@register_mil_to_nn_mapping
def list_scatter(const_context, builder, op):
max_idx_name = op.indices.name + "_max"
builder.add_reduce_max(
name=max_idx_name,
axes=[0],
keepdims=False,
input_name=make_input(const_context, builder, op.indices),
output_name=max_idx_name,
)
_realloc_list(const_context, builder, op.ls, max_idx_name, op.value, "list_scatter")
builder.add_scatter(
name=op.name,
input_names=make_input(const_context, builder, [op.ls, op.indices, op.value]),
output_name=op.outputs[0].name,
)
@register_mil_to_nn_mapping
def list_read(const_context, builder, op):
# gathered_name has shape [1] + elem_shape
gathered_name = op.name + "_gathered"
builder.add_gather(
name=op.name,
input_names=make_input(const_context, builder, [op.ls, op.index]),
output_name=gathered_name,
axis=0,
)
# squeezed_name has shape elem_shape
squeezed_name = op.name + "_squeezed"
builder.add_squeeze(
name=squeezed_name,
input_name=gathered_name,
output_name=op.outputs[0].name,
axes=[0],
)
@register_mil_to_nn_mapping
def list_length(const_context, builder, op):
# list_shape_name == [list_length] + elem_shape
list_shape_name = op.ls.name + "_shape"
builder.add_get_shape(
name=list_shape_name,
input_name=make_input(const_context, builder, op.ls),
output_name=list_shape_name,
)
# slice to get list_length
builder.add_slice_static(
name=op.name,
input_name=list_shape_name,
output_name=op.outputs[0].name,
begin_ids=[0],
end_ids=[1],
begin_masks=[False],
end_masks=[False],
strides=[1],
)
@register_mil_to_nn_mapping
def _const_symbolic(const_context, builder, op):
# do nothing
pass
| bsd-3-clause | d30da0dd0161f0b145b46909c7d95cc0 | 32.785994 | 154 | 0.579533 | 3.350844 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/passes/elementwise_batchnorm_fusion.py | 1 | 4234 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil.passes.helper import block_context_manager
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
def _match_pattern(op):
if op.outputs[0] in op.enclosing_block.outputs:
return None
if op.op_type == "mul":
# find add
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
add_op_candidate = list(child_ops)[0]
if add_op_candidate.op_type == "add":
return add_op_candidate
return None
def _find_const_input_val(op):
if op.x.val is not None:
return op.x.val
if op.y.val is not None:
return op.y.val
return None
def _check_shape(arr):
"""
return True if shape is of form
(1,C,1,1) or (C,1,1)
"""
rank = len(arr.shape)
if not (rank == 3 or rank == 4):
return False
C = arr.shape[-3]
if not (arr.shape == (1, C, 1, 1) or arr.shape == (C, 1, 1)):
return False
return True
def _try_to_transform(mul_op, add_op, block):
non_const_input_mul = mul_op.x if mul_op.x.val is None else mul_op.y
if non_const_input_mul.rank != 4:
return False
gamma = _find_const_input_val(mul_op)
beta = _find_const_input_val(add_op)
if gamma is None or beta is None:
return False
if not (isinstance(gamma, np.ndarray) and isinstance(beta, np.ndarray)):
return False
# check that gamma and beta have shape (1,C,1,1) or (C,1,1)
# that is they are doing vector addition on the axis=-3, which is what the
# batchnorm layer does (batchnorm layer only works on rank 4 input tensors)
if not (_check_shape(gamma) and _check_shape(beta)):
return False
C = gamma.shape[-3]
if C == 1:
return False
out_name = add_op.outputs[0].name
x = mb.batch_norm(
x=non_const_input_mul,
mean=np.zeros((C,), np.float32),
variance=np.ones((C,), np.float32),
gamma=np.squeeze(gamma),
beta=np.squeeze(beta),
name=out_name,
before_op=mul_op,
)
add_op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=add_op, old_var=add_op.outputs[0], new_var=x
)
# Remove all the ops at once
block.remove_ops([mul_op, add_op])
return True
@block_context_manager
def _fuse_elementwise_to_batchnorm_block(block):
fusion_status = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = _fuse_elementwise_to_batchnorm_block(b)
if len(op.blocks) > 0:
# This op can't be mul
continue
add_op = _match_pattern(op)
if add_op is not None:
fusion_status = _try_to_transform(op, add_op, block)
# has to break as the downstream iterator is affected.
if fusion_status:
return fusion_status
return fusion_status
@register_pass(namespace="common")
class fuse_elementwise_to_batchnorm(AbstractGraphPass):
"""
Fold mul + add into a batch norm,
if the const feeding into the mul/add is of shape (1,C,1,1) or (C,1,1)
and input to mul is of rank 4.
Given:
[Const] [Const]
| |
V V
[...] --> [Mul] --> [Add] --> [...]
That is,
%2 = op1(%1)
%3 = mul(%2, constant)
%4 = add(%3, constant)
%5 = op2(%4)
...
Result:
[...] --> [BatchNorm] --> [...]
That is,
%2 = op1(%1)
%4 = batchnorm(%2)
%5 = op2(%4)
...
"""
def apply(self, prog):
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = _fuse_elementwise_to_batchnorm_block(f)
| bsd-3-clause | 46d40d43b39eb0b5487d01abccd42eef | 27.802721 | 83 | 0.581247 | 3.312989 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/frontend/tensorflow/parsed_tf_node.py | 1 | 3234 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import types
from .tfssa import ParsedNode
class ParsedTFNode(ParsedNode):
"""
A parsed TensorFlow Node.
name: The name of the node (str)
op: The operation represented by the node (str)
datatype: The type of the node. (type)
value: The value of the node if available
inputs: The list of nodes which are inputs to this node (list[str])
control_inputs: The list of nodes which have to be executed before this node (list[str])
attr: The attributes of the node
outputs: The list of nodes which consume the result of this node (list[str])
control_outputs: The list of nodes which have to be executed after this node (list[str])
"""
def __init__(self, tfnode=None):
super(ParsedTFNode, self).__init__()
self.original_node = tfnode
if tfnode is not None:
from .parse import parse_attr
self.name = tfnode.name
if tfnode.op == "PlaceholderWithDefault":
self.op = "Placeholder"
else:
self.op = tfnode.op
self.inputs = [x for x in tfnode.input if not x.startswith("^")]
self.control_inputs = [x[1:] for x in tfnode.input if x.startswith("^")]
self.attr = {k: parse_attr(v) for k, v in tfnode.attr.items()}
def parse_from_attr(self):
if "value" in self.attr:
self.datatype = self.attr["value"].__class__
elif "_output_shapes" in self.attr:
output_shapes = self.attr["_output_shapes"]
if output_shapes[0] is not None and len(output_shapes[0]) > 0:
if "dtype" in self.attr:
rettype = types.tensor(self.attr["dtype"], tuple(output_shapes[0]))
elif "T" in self.attr:
rettype = types.tensor(self.attr["T"], tuple(output_shapes[0]))
elif "Tparams" in self.attr:
rettype = types.tensor(
self.attr["Tparams"], tuple(output_shapes[0])
)
else:
raise NotImplementedError(
"Op-(%s) %s not implemented\nWith attribute:"
+ str(self.attr) % (self.op, self.name)
)
self.datatype = rettype
elif "dtype" in self.attr:
self.datatype = self.attr["dtype"]
elif "shape" in self.attr:
shape = self.attr["shape"]
assert "dtype" in self.attr
if len(shape) == 0:
self.datatype = self.attr["dtype"]
else:
self.datatype = types.tensor(self.attr["dtype"], shape)
elif "dtype" in self.attr:
self.datatype = self.attr["dtype"]
def _copy_impl(self, dest):
dest = super(ParsedTFNode, self)._copy_impl(dest)
dest.original_node = self.original_node
return dest
def __copy__(self):
return self._copy_impl(ParsedTFNode())
| bsd-3-clause | d311f98970486e06155d5d3c0632e110 | 39.425 | 92 | 0.567718 | 3.968098 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/frontend/torch/internal_graph.py | 1 | 12404 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from collections import OrderedDict
from itertools import islice
def _make_ssa_name(name):
"""
Converts a symbol name (string) into an SSA name, by prepending '%'.
Only used for pretty printing the graph.
"""
if name is None:
return "None"
return "%" + name
def _ssa_name_list(names):
"""
Take a list of symbol names (strings) and return them as SSA names. Only
used for pretty printing the graph.
"""
return [_make_ssa_name(x) for x in names]
def _find_new_name(old_name, node_names):
"""
Disambiguate a node's name from a list of existing node names by adding
successively larger integers.
"""
count = 0
new_name = old_name + "." + str(count) if count != 0 else old_name
while new_name in node_names:
count += 1
new_name = old_name + "." + str(count)
return new_name
def _replace_in_list(ls, old_val, new_val):
"""Helper function to replace a value in a list."""
try:
idx = ls.index(old_val)
except ValueError:
pass
else:
ls[idx] = new_val
class InternalTorchIRBlock:
"""
coremltools internal representation of a torch IR block.
"""
def __init__(self, raw_block=None, parent=None, nodes=None, inputs=None, outputs=None):
""""
Arguments:
raw_block: The torch._C.Block to convert, or None.
parent: The InternalTorchIRNode this block belongs to.
nodes: If @raw_block is None, the list of InternalTorchIRNodes in the block
inputs: If @raw_block is None, the list of input symbols.
outputs: If @raw_block is None, the list of output symbols.
"""
self.nodes = []
node_names = set()
self.inputs = []
self.outputs = []
self.parent = parent
if raw_block:
# Add nodes
for raw_node in raw_block.nodes():
new_node = InternalTorchIRNode(raw_node, parent=self)
if new_node.name == new_node.kind:
new_node.name = _find_new_name(new_node.name, node_names)
self.nodes.append(new_node)
node_names.add(new_node.name)
# Add inputs
for inp in raw_block.inputs():
self.inputs.append(inp.debugName())
# Add outputs
for outp in raw_block.outputs():
self.outputs.append(outp.debugName())
else:
self.nodes = nodes
self.inputs = inputs
self.outputs = outputs
def __str__(self, indent=2):
indent_str = " " * indent
graph_str = "{}block({}):\n".format(
indent_str, ", ".join(_ssa_name_list(self.inputs))
)
graph_str += "{}\n".format(indent_str).join(
[x.__str__(indent=indent + 2) for x in self.nodes]
)
graph_str += "\n{}return ({})".format(
indent_str, ", ".join(_ssa_name_list(self.outputs))
)
return graph_str
def __repr__(self):
return str(self)
def replace_name(self, old_name, new_name):
"""Replaces all instances of @old_name with @new_name in @self."""
# Replace graph inputs/outputs
_replace_in_list(self.inputs, old_name, new_name)
_replace_in_list(self.outputs, old_name, new_name)
for node in self.nodes:
node.replace_name(old_name, new_name)
class InternalTorchIRNode:
"""
coremltools internal representation of a torch IR node.
Can construct itself from a provided torchIR node or manually constructed with
args for testing.
See InternalTorchIRGraph for the motivation behind this structure.
"""
def __init__(
self, node=None, parent=None, attr=None, inputs=None, outputs=None, kind=None, blocks=None,
):
"""
Arguments:
node: The torch._C.Node to convert, or None.
parent: The InternalTorchIRGraph/Block this node belongs to.
attr: If @node is not specified, the dict of named attributes.
inputs: If @node is not specified, the list of input symbols.
outputs: If @node is not specified, the list of output symbols.
kind: If @node is not specified, the kind (op) of the node.
blocks: If @node is not specified, the list of InternalTorchIRBlock.
"""
self.parent = parent
if node is not None:
self.inputs = [_input.debugName() for _input in node.inputs()]
self.outputs = [output.debugName() for output in node.outputs()]
self.kind = node.kind().split("::")[-1].lower()
self.blocks = [InternalTorchIRBlock(raw_block=b, parent=self) for b in node.blocks()]
self.attr = {
name: getattr(node, node.kindOf(name))(name)
for name in node.attributeNames()
}
if "value" not in self.attr:
self.attr["value"] = None
# If the output is boolean, explicitly cast it so type inference
# will work correctly.
if len(self.outputs) == 1 and next(node.outputs()).type().str() == "bool":
self.attr["value"] = bool(self.attr["value"])
else:
self.inputs = inputs
self.outputs = outputs
self.kind = kind
self.blocks = blocks if blocks is not None else []
self.attr = attr if attr is not None else {"value": None}
# On rare occassions, a node has no outputs. In that case, the node's
# name will be its kind. However, this no longer guarantees the node's
# name is unique. It will be up to the graph constructing the node to
# make sure names are unique.
self.name = self.outputs[0] if len(self.outputs) > 0 else self.kind
def __str__(self, indent=2):
node_str = " " * indent + "{} = {}".format(
", ".join(_ssa_name_list(self.outputs)), self.kind
)
node_str += "[{}]".format(
", ".join(
["{}={}".format(n, v) for n, v in self.attr.items() if v is not None]
)
)
node_str += "({})".format(", ".join(_ssa_name_list(self.inputs)))
for b in self.blocks:
node_str += "\n" + b.__str__(indent=indent + 2)
return node_str
def __repr__(self):
return str(self)
def replace_name(self, old_name, new_name):
"""Replaces all instances of @old_name with @new_name in @self."""
_replace_in_list(self.inputs, old_name, new_name)
_replace_in_list(self.outputs, old_name, new_name)
if self.name == old_name:
self.name = new_name
for block in self.blocks:
block.replace_name(old_name, new_name)
class InternalTorchIRGraph:
"""
CoreML internal representation of a torch IR graph. A torch._C.Graph
object is not an ideal structure to use in converting to CoreML. Conversion
to an InternalTorchIRGraph is inserted between the original graph and the
final CoreML model to address several issues:
1. A torch._C.graph is hard to work with. For example, its .inputs()
and .outputs() functions return iterators, so the only way to
determine the number of inputs/outputs is by counting to the end.
There are other examples of why the torch structure is hard to work
with, and this structure alleviates those isses.
2. torch._C.graph is an internal API and so we can't count on its
stability. By inserting a layer in between, we can handle any changes
to torch._C.graph here and isolate the ops code that processes the
graph.
3. torch._C.graph does not expose a Python constructor. This makes
it impossible to write unit tests that isolate specific ops since
they have to come from actually converting a PyTorch graph. With an
internal structure, we can directly build the test cases we need for
unit testing.
"""
def __init__(
self, raw_graph=None, params_dict=None, input_values=None, cut_at_symbols=None,
nodes=None, params=None, inputs=None, outputs=None,
):
"""
Arguments:
raw_graph: raw_graph: The torch._C.Graph to convert, or None.
params_dict: A dictionary mapping graph parameter names to tensors.
Must be given if @raw_graph is not None.
input_values: A list of inputs to the graph. Must be given is
@raw_graph if not None.
cut_at_symbols: The list of desired outputs from the graph. Symbols
must be present in the graph. For debugging use only. Can only
be given if @raw_graph is not None.
nodes: If @raw_graph is None, the list of InternalTorchIRNodes in
the graph.
params: If @raw_graph is None, the dict mapping parameter names to
their numpy value.
inputs: If @raw_graph is None, the OrderedDict mapping input names
to their example values.
outputs: list[str], If @raw_graph is None, the list of outputs from the graph.
"""
self.nodes = []
node_names = set()
self.params = {}
self.inputs = OrderedDict()
self.outputs = []
if raw_graph is not None:
# Add nodes
for raw_node in raw_graph.nodes():
new_node = InternalTorchIRNode(raw_node, parent=self)
if new_node.name == new_node.kind:
new_node.name = _find_new_name(new_node.name, node_names)
self.nodes.append(new_node)
node_names.add(new_node.name)
# Add params
for name, param in params_dict.items():
value = param.detach().cpu().numpy()
self.params[name] = value
# Add inputs
# The first element of the raw_graph.inputs() is the 'self' of the module, which is not used.
graph_inputs = list(raw_graph.inputs())[1:]
for index, _input in enumerate(islice(graph_inputs, len(input_values))):
name = _input.debugName()
value = input_values[index]
self.inputs[name] = value
# Add outputs, cutting if @cut_at_symbols is set
output_names = cut_at_symbols
if output_names is None:
output_names = [x.debugName() for x in raw_graph.outputs()]
for output in output_names:
self.outputs.append(output)
else:
self.nodes = nodes
self.params = params
self.inputs = inputs
self.outputs = outputs
def __str__(self):
graph_str = "graph(\n"
graph_str += self._format_inputs(self.inputs, unpack=True)
graph_str += self._format_inputs(self.params)
graph_str += "):\n"
graph_str += "\n".join([str(x) for x in self.nodes]) + "\n"
graph_str += "return ({})".format(", ".join(_ssa_name_list(self.outputs)))
return graph_str
def _format_inputs(self, inputs, unpack=False):
def tensor_str(x):
return "Tensor{}".format(
tuple(list(x.shape.shape if unpack else x.shape) + [str(x.dtype)])
)
inp_str = ""
for k, v in inputs.items():
if isinstance(v, (tuple, list)):
shape_str = "({})".format(", ".join([tensor_str(x) for x in v]))
else:
shape_str = tensor_str(v)
inp_str += " {} : {},\n".format(_make_ssa_name(k), shape_str)
return inp_str
def __repr__(self):
return str(self)
def replace_name(self, old_name, new_name):
"""Replaces all instances of @old_name with @new_name in @self."""
# Replace graph inputs/outputs
_replace_in_list(self.inputs, old_name, new_name)
_replace_in_list(self.outputs, old_name, new_name)
for node in self.nodes:
node.replace_name(old_name, new_name)
| bsd-3-clause | e9e04c8d7696fb21f52b66a891b527fc | 37.7625 | 105 | 0.573363 | 3.971822 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/defs/iOS16/constexpr_ops.py | 1 | 14241 | # Copyright (c) 2022, Apple Inc. All rights reserved.
import numpy as np
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.input_type import (InputSpec,
TensorInputType)
from coremltools.converters.mil.mil.operation import Operation
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.ops.defs.iOS16 import _IOS16_TARGET
@register_op(opset_version=_IOS16_TARGET)
class constexpr_affine_dequantize(Operation):
"""
A compile-time operation that returns a constant output value upon dequantizing its constant inputs.
This operation is used to represent constant 8-bit quantized data with affine/linear quantization.
The quantized data is stored in the parameter ``quantized_data``.
The other parameters -- ``scale``, ``zero_point``, and ``axis`` -- describe how
unquantized values can be extracted from it, using the equation for affine/linear quantization:
::
unquantized_data = scale * (quantized_data - zero_point)
Although all of the parameters of this op are constants, this op is not constant folded
to a single const op at the time of model serialization. The unquantized output will
be decompressed later, based on the implementation detail (either at model load time or runtime).
Parameters
----------
quantized_data: const tensor<SrcT, [1..]> (Required)
zero_point: const tensor<SrcT, [0..1]> (Required)
* ``zero_point`` can be either a scalar or a vector.
* ``zero_point`` follows similar broadcasting rules and size constraints as ``scale``.
scale: const tensor<DstT, [0..1]> (Required)
* ``scale`` can be either a scalar or a vector. If ``scale`` is a vector,
for implementation it is broadcast to the following shape:
* The rank of ``scale`` becomes the same as the rank of ``quantized_data``.
* The constraint: ``size(scale-vector) == quantized_data.shape[axis]``.
* For ``i == axis``, ``scale.shape[i] == quantized_data.shape[i]``.
* For ``i != axis``, ``scale.shape == 1``.
For example, assume ``quantized_data.shape = (2, 3, 4, 5)`` and ``axis = 1``.
If ``scale`` is a vector, then ``scale.size`` needs to be equal to
``quantized_data.shape[axis] i.e = 3``, which would be broadcast to ``(1, 3, 1, 1)``.
axis: const tensor<int32, []> (Required)
Returns
-------
const tensor<DstT, [1..]>
Attributes
----------
SrcT: uint8, int8
DstT: fp16, fp32
"""
input_spec = InputSpec(
quantized_data=TensorInputType(const=True, type_domain="SrcT"),
zero_point=TensorInputType(const=True, type_domain="ZeroPointT"),
scale=TensorInputType(const=True, type_domain="DstT"),
axis=TensorInputType(const=True, type_domain=types.int32),
)
type_domains = {
"DstT": (types.fp16, types.fp32),
"SrcT": (types.uint8, types.int8),
"ZeroPointT": (types.uint8, types.int8),
}
def type_inference(self):
def assert_is_scalar_or_vector(param, name):
if param.rank not in (0, 1):
raise ValueError(
"Parameter {} needs to be either a scalar or vector".format(name)
)
def assert_vector_size_same_as_axial_dimension(param, axis_dim_size, name):
if param.rank == 1 and param.shape[0] != axis_dim_size:
raise ValueError(
"Parameter {}, if vector, needs to have same size as the dimension size along the parameter quantized_data".format(
name
)
)
if self.zero_point.dtype != self.quantized_data.dtype:
raise ValueError(
"Parameters quantized_data and zero_point needs to be of the same dtype"
)
rank = self.quantized_data.rank
if self.axis.val < -rank or self.axis.val >= rank:
raise ValueError(
"Parameter axis needs to be in the range -quantized_data.rank <= axis < quantized_data.rank"
)
assert_is_scalar_or_vector(self.scale, "scale")
assert_is_scalar_or_vector(self.zero_point, "zero_point")
assert_vector_size_same_as_axial_dimension(
self.scale, self.quantized_data.shape[self.axis.val], "scale"
)
assert_vector_size_same_as_axial_dimension(
self.zero_point, self.quantized_data.shape[self.axis.val], "zero_point"
)
dtype = self.scale.dtype
shape = self.quantized_data.shape
return types.tensor(dtype, shape)
def value_inference(self):
return self.decompress(
self.quantized_data.val,
self.zero_point.val,
self.scale.val,
self.axis.val
)
@staticmethod
def decompress(quantized_data, zero_point, scale, axis):
axis = axis if axis >= 0 else axis + len(quantized_data.shape)
def rank_promoted_to_same_as_quantized_data(param):
if len(param.shape) == 0:
return np.reshape(param, np.ones(len(quantized_data.shape), np.int32))
else:
axes = [i for i in range(len(quantized_data.shape)) if i != axis]
return np.expand_dims(param, axis=tuple(axes))
sc = rank_promoted_to_same_as_quantized_data(scale)
zp = rank_promoted_to_same_as_quantized_data(zero_point)
val = sc * (quantized_data.astype(np.float32) - zp.astype(np.float32))
return val.astype(scale.dtype)
@register_op(opset_version=_IOS16_TARGET)
class constexpr_cast(Operation):
"""
A compile-time operation that returns a constant output value upon casting its constant input.
::
Expression: output = constexpr_cast(source_val, output_dtype="fp32")
Parameters
----------
source_val: const tensor<SrcT, [...]> (Required)
output_dtype: const tensor<string, []> (Required)
Returns
-------
const tensor<DstT, [...]>
Attributes
----------
SrcT: fp16
DstT: fp32
"""
input_spec = InputSpec(
source_val=TensorInputType(const=True, type_domain=types.fp16),
output_dtype=TensorInputType(const=True, type_domain=types.str),
)
def type_inference(self):
dtype = types.string_to_builtin(self.output_dtype.val)
if dtype != types.fp32:
raise NotImplementedError("Only output_dtype = fp32 is supported")
shape = self.source_val.shape
return types.tensor(dtype, shape)
def value_inference(self):
return np.float32(self.source_val.val)
@register_op(opset_version=_IOS16_TARGET)
class constexpr_lut_to_dense(Operation):
"""
A compile-time operation that returns a constant output value upon decompressing
a look-up table (LUT) to a dense tensor.
This operation is used to store constant weights in a LUT format (also known as
`palettized` weights). A LUT is a mapping from index to values.
Weights are quantized and stored as indices (or keys) into the LUT.
Before computation, these keys are mapped to corresponding values in the LUT.
Parameters
----------
indices: const tensor<uint8, [M]> (Required)
lut: const tensor<T, [NUM_PALETTES]> (Required)
shape: const tensor<uint32, [K]> (Required)
Notes
-----
* Any data is packed and read in a row-major order.
* ``NUM_PALETTES`` can be one of ``{2, 4, 16, 64 or 256}``.
* ``n_bits = log2(NUM_PALETTES)`` can thus be one of ``{1, 2, 4, 6, 8}``.
* Indices are packed in bytes of size ``M``, where ``M = ceil(n_bits * product(shape) / 8)``.
The bit fields are packed one byte at a time, starting with the least significant bit (LSB) and
moving upward to the most significant bit (MSB). It follows, naturally, that if an index is split
across two bytes, the LSBs of that index is filled over the MSBs of current byte, and the remaining
bits of the same index are filled in the LSBs of the next byte.
For example:
::
if n_bits = 2, shape = (5,) => M = 2 bytes
MSB LSB
| |
indices = | 01 10 11 00 | xx xx xx 11 | <== packed elements
| i3 | i2 | i1 | i0 | -- | -- | -- | i4 | <== tagged element ids
| byte 0 | byte 1 | <== tagged bytes
Returns
-------
const tensor<T, [...]>
Attributes
----------
T: uint8, int8, fp16, fp32
"""
input_spec = InputSpec(
indices=TensorInputType(const=True, type_domain=types.uint8),
lut=TensorInputType(const=True, type_domain="T"),
shape=TensorInputType(const=True, type_domain=types.uint32),
)
type_domains = {
"T": (types.int8, types.uint8, types.fp16, types.fp32)
}
def type_inference(self):
def assert_is_vector(param, name):
if param.rank != 1:
raise ValueError("Parameter {} needs to have rank == 1".format(name))
assert_is_vector(self.indices, "indices")
assert_is_vector(self.lut, "lut")
if self.lut.shape[0] not in (2, 4, 16, 64, 256):
raise ValueError(
"Parameter lut should be a vector of size from one of {2, 4, 16, 64, 256}"
)
nbits = int(np.log2(self.lut.shape[0]))
output_size = np.prod(self.shape.val)
if self.indices.shape[0] != np.ceil(nbits * (output_size / 8.0)):
raise AssertionError(
"Constraint violated, M = ceil(n_bits * product(shape) / 8) where M = indices.size"
)
dtype = self.lut.dtype
shape = self.shape.val
return types.tensor(dtype, shape)
def value_inference(self):
return self.decompress(
self.lut.val,
self.indices.val,
self.shape.val,
)
@staticmethod
def decompress(lut, indices, shape):
bitarray = np.unpackbits(indices, bitorder="little")
nbits = np.log2(lut.size).astype(np.int32)
pad_required = bitarray.size % nbits != 0
if pad_required:
bitarray = np.concatenate([bitarray, np.zeros(bitarray.size % nbits)])
assert bitarray.size % nbits == 0
size = np.prod(shape)
bitarray = bitarray.reshape(-1, nbits)[:size, :]
indices = np.packbits(bitarray, bitorder="little", axis=-1).reshape(-1)
flatten_val = lut[indices]
return flatten_val.reshape(shape)
@register_op(opset_version=_IOS16_TARGET)
class constexpr_sparse_to_dense(Operation):
"""
A compile-time operation that returns a constant output value upon de-sparsification of its constant inputs.
This operation represents unstructured sparsity and uses bit mask binary representation.
If a bit is set, then the corresponding element in the output tensor is non-zero and the
value is read from the ``nonzero_data`` attribute. Likewise, if the bit is not set,
then the corresponding element in the output tensor is zero.
Parameters
----------
nonzero_data: const tensor<T, [D]> (Required)
mask: const tensor<uint8, [M]> (Required)
shape: const tensor<uint32, [K]> (Required)
Notes
-----
* Any data is packed and read in a row-major order.
* ``mask`` contains ``M`` bytes, where ``M = ceil( product(shape) / 8)``. That is, each bit
field corresponds to one element in the output tensor.
* ``D ==`` the total number of set bits in ``mask``.
The bit fields are packed one byte at a time, starting with the least significant bit and
moving up to the most significant bit.
For example:
::
shape = (5,) => M = 1 bytes
MSB LSB
| |
mask = |x x x 0 1 1 0 0 | <== packed elements
|--|--|--|i4|i3|i2|i1|i0| <== tagged element ids
| byte 0 | <== tagged bytes
Returns
-------
const tensor<T, [...]>
Attributes
----------
T: uint8, int8, fp16, fp32
"""
input_spec = InputSpec(
nonzero_data=TensorInputType(const=True, type_domain="T"),
mask=TensorInputType(const=True, type_domain=types.uint8),
shape=TensorInputType(const=True, type_domain=types.uint32),
)
type_domains = {
"T": (types.int8, types.uint8, types.fp16, types.fp32)
}
def type_inference(self):
def assert_is_vector(param, name):
if param.rank != 1:
raise ValueError("Parameter {} needs to have rank == 1".format(name))
assert_is_vector(self.nonzero_data, "nonzero_data")
assert_is_vector(self.mask, "mask")
if sum(bin(x).count("1") for x in self.mask.val) != self.nonzero_data.shape[0]:
raise AssertionError(
"Number of set bits in mask needs to be equal to number of elements in parameter nonzero_data"
)
output_size = np.prod(self.shape.val)
if self.mask.shape[0] != np.ceil(output_size / 8.0):
raise AssertionError(
"Constraint Violated: M = ceil( product(shape) / 8) where M = mask.size"
)
bitarray = np.unpackbits(self.mask.val, bitorder="little")
if any(bitarray[i] != 0 for i in range(output_size, len(bitarray))):
raise AssertionError("Padded bits in mask should be unset or equals to zero")
dtype = self.nonzero_data.dtype
shape = self.shape.val
return types.tensor(dtype, shape)
def value_inference(self):
return self.decompress(self.nonzero_data.val, self.mask.val, self.shape.val)
@staticmethod
def decompress(nonzero_data, mask, shape):
flattend_val = np.zeros(shape, dtype=nonzero_data.dtype).flatten()
flattend_val[
np.where(np.unpackbits(mask, bitorder="little") != 0)
] = nonzero_data
return flattend_val.reshape(shape)
| bsd-3-clause | 8b517b61ffe5052ac9bfb3441f935fa6 | 36.182768 | 135 | 0.599888 | 3.736814 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/types/get_type_info.py | 1 | 2123 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from .type_spec import FunctionType, Type
from .type_void import void
def get_python_method_type(py_function):
# given a python class method, parse the annotations to figure out the type
function_inputs = []
function_output = get_type_info(void)
annotations = {}
if hasattr(py_function, "type_annotations"):
annotations = {
k: get_type_info(v) for k, v in py_function.type_annotations.items()
}
if hasattr(py_function, "return_type"):
function_output = get_type_info(py_function.return_type)
try:
if hasattr(py_function, "__func__"):
argcount = py_function.__func__.__code__.co_argcount
argnames = py_function.__func__.__code__.co_varnames[:argcount]
else:
argcount = py_function.__code__.co_argcount
argnames = py_function.__code__.co_varnames[:argcount]
except:
raise TypeError(
"Unable to derive type information from method %s. "
"You might have a misspecified type. Ex: use compyler.int and not int"
% py_function
)
for arg in argnames:
if arg in annotations:
function_inputs.append(annotations[arg])
elif arg != "self":
raise TypeError(
"Function "
+ str(py_function)
+ " insufficient annotations. "
+ arg
+ " needs a type"
)
typeinfo = FunctionType(function_inputs, function_output, py_function)
return typeinfo
def get_type_info(t):
if hasattr(t, "__type_info__"):
ret = t.__type_info__()
assert ret.python_class is not None
return ret
elif isinstance(t, type):
return Type(t.__name__, python_class=t)
elif hasattr(t, "__call__"):
return get_python_method_type(t)
raise TypeError("Unsupported type %s" % t)
| bsd-3-clause | d02b882658d3fd385c5b4535c0cdb2da | 34.983051 | 83 | 0.594442 | 3.953445 | false | false | false | false |
apple/coremltools | deps/protobuf/python/google/protobuf/internal/message_factory_test.py | 35 | 7973 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.message_factory."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
class MessageFactoryTest(unittest.TestCase):
def setUp(self):
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
def _ExerciseDynamicClass(self, cls):
msg = cls()
msg.mandatory = 42
msg.nested_factory_2_enum = 0
msg.nested_factory_2_message.value = 'nested message value'
msg.factory_1_message.factory_1_enum = 1
msg.factory_1_message.nested_factory_1_enum = 0
msg.factory_1_message.nested_factory_1_message.value = (
'nested message value')
msg.factory_1_message.scalar_value = 22
msg.factory_1_message.list_value.extend([u'one', u'two', u'three'])
msg.factory_1_message.list_value.append(u'four')
msg.factory_1_enum = 1
msg.nested_factory_1_enum = 0
msg.nested_factory_1_message.value = 'nested message value'
msg.circular_message.mandatory = 1
msg.circular_message.circular_message.mandatory = 2
msg.circular_message.scalar_value = 'one deep'
msg.scalar_value = 'zero deep'
msg.list_value.extend([u'four', u'three', u'two'])
msg.list_value.append(u'one')
msg.grouped.add()
msg.grouped[0].part_1 = 'hello'
msg.grouped[0].part_2 = 'world'
msg.grouped.add(part_1='testing', part_2='123')
msg.loop.loop.mandatory = 2
msg.loop.loop.loop.loop.mandatory = 4
serialized = msg.SerializeToString()
converted = factory_test2_pb2.Factory2Message.FromString(serialized)
reserialized = converted.SerializeToString()
self.assertEqual(serialized, reserialized)
result = cls.FromString(reserialized)
self.assertEqual(msg, result)
def testGetPrototype(self):
db = descriptor_database.DescriptorDatabase()
pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
factory = message_factory.MessageFactory()
cls = factory.GetPrototype(pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message'))
self.assertFalse(cls is factory_test2_pb2.Factory2Message)
self._ExerciseDynamicClass(cls)
cls2 = factory.GetPrototype(pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message'))
self.assertTrue(cls is cls2)
def testGetMessages(self):
# performed twice because multiple calls with the same input must be allowed
for _ in range(2):
messages = message_factory.GetMessages([self.factory_test1_fd,
self.factory_test2_fd])
self.assertTrue(
set(['google.protobuf.python.internal.Factory2Message',
'google.protobuf.python.internal.Factory1Message'],
).issubset(set(messages.keys())))
self._ExerciseDynamicClass(
messages['google.protobuf.python.internal.Factory2Message'])
factory_msg1 = messages['google.protobuf.python.internal.Factory1Message']
self.assertTrue(set(
['google.protobuf.python.internal.Factory2Message.one_more_field',
'google.protobuf.python.internal.another_field'],).issubset(set(
ext.full_name
for ext in factory_msg1.DESCRIPTOR.file.pool.FindAllExtensions(
factory_msg1.DESCRIPTOR))))
msg1 = messages['google.protobuf.python.internal.Factory1Message']()
ext1 = msg1.Extensions._FindExtensionByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
ext2 = msg1.Extensions._FindExtensionByName(
'google.protobuf.python.internal.another_field')
msg1.Extensions[ext1] = 'test1'
msg1.Extensions[ext2] = 'test2'
self.assertEqual('test1', msg1.Extensions[ext1])
self.assertEqual('test2', msg1.Extensions[ext2])
def testDuplicateExtensionNumber(self):
pool = descriptor_pool.DescriptorPool()
factory = message_factory.MessageFactory(pool=pool)
# Add Container message.
f = descriptor_pb2.FileDescriptorProto()
f.name = 'google/protobuf/internal/container.proto'
f.package = 'google.protobuf.python.internal'
msg = f.message_type.add()
msg.name = 'Container'
rng = msg.extension_range.add()
rng.start = 1
rng.end = 10
pool.Add(f)
msgs = factory.GetMessages([f.name])
self.assertIn('google.protobuf.python.internal.Container', msgs)
# Extend container.
f = descriptor_pb2.FileDescriptorProto()
f.name = 'google/protobuf/internal/extension.proto'
f.package = 'google.protobuf.python.internal'
f.dependency.append('google/protobuf/internal/container.proto')
msg = f.message_type.add()
msg.name = 'Extension'
ext = msg.extension.add()
ext.name = 'extension_field'
ext.number = 2
ext.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
ext.type_name = 'Extension'
ext.extendee = 'Container'
pool.Add(f)
msgs = factory.GetMessages([f.name])
self.assertIn('google.protobuf.python.internal.Extension', msgs)
# Add Duplicate extending the same field number.
f = descriptor_pb2.FileDescriptorProto()
f.name = 'google/protobuf/internal/duplicate.proto'
f.package = 'google.protobuf.python.internal'
f.dependency.append('google/protobuf/internal/container.proto')
msg = f.message_type.add()
msg.name = 'Duplicate'
ext = msg.extension.add()
ext.name = 'extension_field'
ext.number = 2
ext.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
ext.type_name = 'Duplicate'
ext.extendee = 'Container'
pool.Add(f)
with self.assertRaises(Exception) as cm:
factory.GetMessages([f.name])
self.assertIsInstance(cm.exception, (AssertionError, ValueError))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | fc4021ca939232b38b990024755f86ee | 40.963158 | 80 | 0.716794 | 3.83871 | false | true | false | false |
apple/coremltools | deps/protobuf/examples/add_person.py | 39 | 1657 | #! /usr/bin/env python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# This function fills in a Person message based on user input.
def PromptForAddress(person):
person.id = int(raw_input("Enter person ID number: "))
person.name = raw_input("Enter name: ")
email = raw_input("Enter email address (blank for none): ")
if email != "":
person.email = email
while True:
number = raw_input("Enter a phone number (or leave blank to finish): ")
if number == "":
break
phone_number = person.phones.add()
phone_number.number = number
type = raw_input("Is this a mobile, home, or work phone? ")
if type == "mobile":
phone_number.type = addressbook_pb2.Person.MOBILE
elif type == "home":
phone_number.type = addressbook_pb2.Person.HOME
elif type == "work":
phone_number.type = addressbook_pb2.Person.WORK
else:
print "Unknown phone type; leaving as default value."
# Main procedure: Reads the entire address book from a file,
# adds one person based on user input, then writes it back out to the same
# file.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
try:
with open(sys.argv[1], "rb") as f:
address_book.ParseFromString(f.read())
except IOError:
print sys.argv[1] + ": File not found. Creating a new file."
# Add an address.
PromptForAddress(address_book.people.add())
# Write the new address book back to disk.
with open(sys.argv[1], "wb") as f:
f.write(address_book.SerializeToString())
| bsd-3-clause | 9f9ffc22a0228503a85c2b3f7c9e5266 | 28.589286 | 76 | 0.678938 | 3.430642 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/tests/test_elementwise_unary.py | 1 | 24212 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import numpy as np
import pytest
import scipy
from coremltools.converters.mil import testing_reqs
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import (Function, get_new_symbol,
types)
from coremltools.converters.mil.mil.types.symbolic import \
is_compatible_symbolic_vector
from coremltools.converters.mil.testing_utils import ssa_fn
from .testing_utils import run_compare_builder
backends = testing_reqs.backends
compute_units = testing_reqs.compute_units
class TestElementwiseUnary:
# All ops in this test share the same backends
@pytest.mark.parametrize(
"compute_unit, backend, mode",
itertools.product(
compute_units,
backends,
[
"abs",
"acos",
"asin",
"atan",
"atanh",
"cast",
"clip",
"cos",
"cosh",
"erf",
"exp",
"exp2",
"floor",
"inverse",
"log",
"round",
"rsqrt",
"sign",
"sin",
"sinh",
"sqrt",
"square",
"tan",
"tanh",
"threshold",
],
),
)
def test_builder_to_backend_smoke(self, compute_unit, backend, mode):
if mode == "abs":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
build = lambda x: mb.abs(x=x)
elif mode == "acos":
val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32)
expected_outputs = np.array(
[
[3.14159265, 2.0943951, 1.57079633],
[1.15927948, 1.04719755, 0.64350111],
],
dtype=np.float32,
)
build = lambda x: mb.acos(x=x)
elif mode == "asin":
val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32)
expected_outputs = np.array(
[[-1.57079633, -0.52359878, 0.0], [0.41151685, 0.52359878, 0.92729522]],
dtype=np.float32,
)
build = lambda x: mb.asin(x=x)
elif mode == "atan":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[
[-0.78539816, 1.10714872, -1.24904577],
[1.32581766, -1.37340077, 1.40564765],
],
dtype=np.float32,
)
build = lambda x: mb.atan(x=x)
elif mode == "atanh":
val = np.array([[-0.8, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32)
expected_outputs = np.array(
[[-1.09861229, -0.54930614, 0.0], [0.42364893, 0.54930614, 1.09861229]],
dtype=np.float32,
)
build = lambda x: mb.atanh(x=x)
elif mode == "cast":
val = np.array([[-1.2, 2, -3.6], [4.5, -5, 6.7]], dtype=np.float32)
expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.int32)
build = lambda x: mb.cast(x=x, dtype="int32")
elif mode == "ceil":
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32)
build = lambda x: mb.ceil(x=x)
elif mode == "clip":
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
expected_outputs = np.array([[0, 2, 0], [4.5, 0, 5]], dtype=np.float32)
build = lambda x: mb.clip(x=x, alpha=0.0, beta=5.0)
elif mode == "cos":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[
[0.54030231, -0.41614684, -0.9899925],
[-0.65364362, 0.28366219, 0.96017029],
],
dtype=np.float32,
)
build = lambda x: mb.cos(x=x)
elif mode == "cosh":
val = np.array([[-1, -2, -3], [1, 2, 3]], dtype=np.float32)
expected_outputs = np.array(
[
[1.54308063, 3.76219569, 10.067662],
[1.54308063, 3.76219569, 10.067662],
],
dtype=np.float32,
)
build = lambda x: mb.cosh(x=x)
elif mode == "erf":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[
[-0.8427007929497148, 0.9953222650189527, -0.9999779095030014],
[0.9999999845827421, -0.9999999999984626, 1.0],
],
dtype=np.float32,
)
build = lambda x: mb.erf(x=x)
elif mode == "exp":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[
[0.36787944, 7.3890561, 0.04978707],
[54.5981500, 0.0067379, 403.428793],
],
dtype=np.float32,
)
build = lambda x: mb.exp(x=x)
elif mode == "exp2":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[0.5, 4.0, 0.125], [16, 0.03125, 64]], dtype=np.float32
)
build = lambda x: mb.exp2(x=x)
elif mode == "floor":
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
expected_outputs = np.array([[-2, 2, -4], [4, -5, 6]], dtype=np.float32)
build = lambda x: mb.floor(x=x)
elif mode == "inverse":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[-1.0, 0.5, -0.33333334], [0.25, -0.2, 0.16666667]], dtype=np.float32
)
build = lambda x: mb.inverse(x=x)
elif mode == "log":
val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[0.0, 0.69314718, 1.09861229], [1.38629436, 1.60943791, 1.79175947]],
dtype=np.float32,
)
build = lambda x: mb.log(x=x)
elif mode == "round":
val = np.array([[-1.2, 2, -3.4], [4.6, -5, 6.7]], dtype=np.float32)
expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32)
build = lambda x: mb.round(x=x)
elif mode == "rsqrt":
val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[1.0, 0.70710678, 0.57735027], [0.5, 0.4472136, 0.40824829]],
dtype=np.float32,
)
build = lambda x: mb.rsqrt(x=x)
elif mode == "sign":
val = np.array([[-1, 2, 0], [0, -5, 6]], dtype=np.float32)
expected_outputs = np.array([[-1, 1, 0], [0, -1, 1]], dtype=np.float32)
build = lambda x: mb.sign(x=x)
elif mode == "sin":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[
[-0.84147098, 0.90929743, -0.14112001],
[-0.7568025, 0.95892427, -0.2794155],
],
dtype=np.float32,
)
build = lambda x: mb.sin(x=x)
elif mode == "sinh":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[-1.1752, 3.62686, -10.017874], [27.289917, -74.20321, 201.71315]],
dtype=np.float32,
)
build = lambda x: mb.sinh(x=x)
elif mode == "sqrt":
val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[1.0, 1.41421356, 1.73205081], [2.0, 2.23606798, 2.44948974]],
dtype=np.float32,
)
build = lambda x: mb.sqrt(x=x)
elif mode == "square":
val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[1.0, 4.0, 9.0], [16.0, 25.0, 36.]],
dtype=np.float32,
)
build = lambda x: mb.square(x=x)
elif mode == "tan":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[[-1.5574, -2.185, 0.1425], [1.15782, 3.3805, -0.291]], dtype=np.float32
)
build = lambda x: mb.tan(x=x)
elif mode == "tanh":
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
expected_outputs = np.array(
[
[-0.7615942, 0.9640276, -0.9950548],
[0.9993293, -0.9999092, 0.9999877],
],
dtype=np.float32,
)
build = lambda x: mb.tanh(x=x)
elif mode == "threshold":
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
expected_outputs = np.array(
[[1.0, 2, 1.0], [4.5, 1.0, 6.7]], dtype=np.float32
)
build = lambda x: mb.threshold(x=x, alpha=1.0)
input_placeholders = {"x": mb.placeholder(shape=val.shape)}
input_values = {"x": val}
expected_output_types = (
(2, 3, types.int32) if mode == "cast" else (2, 3, types.fp32)
)
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@ssa_fn
def test_builder_abs_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.abs(x=val)
expected_outputs = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_acos_eval(self):
val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32)
v = mb.acos(x=val)
expected_outputs = np.array(
[[3.14159265, 2.0943951, 1.57079633], [1.15927948, 1.04719755, 0.64350111]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_asin_eval(self):
val = np.array([[-1, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32)
v = mb.asin(x=val)
expected_outputs = np.array(
[[-1.57079633, -0.52359878, 0.0], [0.41151685, 0.52359878, 0.92729522]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_atan_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.atan(x=val)
expected_outputs = np.array(
[
[-0.78539816, 1.10714872, -1.24904577],
[1.32581766, -1.37340077, 1.40564765],
],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_atanh_eval(self):
val = np.array([[-0.8, -0.5, 0], [0.4, 0.5, 0.8]], dtype=np.float32)
v = mb.atanh(x=val)
expected_outputs = np.array(
[[-1.09861229, -0.54930614, 0.0], [0.42364893, 0.54930614, 1.09861229]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_cast_eval(self):
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
expected_outputs = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.int32)
v = mb.cast(x=val, dtype="int32")
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_ceil_eval(self):
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
v = mb.ceil(x=val)
expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_clip_eval(self):
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
v = mb.clip(x=val, alpha=0.0, beta=5.0)
expected_outputs = np.array([[0, 2, 0], [4.5, 0, 5]], dtype=np.float32)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_cos_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.cos(x=val)
expected_outputs = np.array(
[
[0.54030231, -0.41614684, -0.9899925],
[-0.65364362, 0.28366219, 0.96017029],
],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_cosh_eval(self):
val = np.array([[-1, -2, -3], [1, 2, 3]], dtype=np.float32)
v = mb.cosh(x=val)
expected_outputs = np.array(
[[1.54308063, 3.76219569, 10.067662], [1.54308063, 3.76219569, 10.067662]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_erf_eval(self):
x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.erf(x=x_val)
np.testing.assert_allclose(scipy.special.erf(x_val), v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_exp_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.exp(x=val)
expected_outputs = np.array(
[[0.36787944, 7.3890561, 0.04978707], [54.5981500, 0.0067379, 403.428793]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_exp2_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.exp2(x=val)
expected_outputs = np.array(
[[0.5, 4.0, 0.125], [16, 0.03125, 64]], dtype=np.float32
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_floor_eval(self):
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
v = mb.floor(x=val)
expected_outputs = np.array([[-2, 2, -4], [4, -5, 6]], dtype=np.float32)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_inverse_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.inverse(x=val)
expected_outputs = np.array(
[[-1.0, 0.5, -0.33333334], [0.25, -0.2, 0.16666667]], dtype=np.float32
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_log_eval(self):
val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
v = mb.log(x=val)
expected_outputs = np.array(
[[0.0, 0.69314718, 1.09861229], [1.38629436, 1.60943791, 1.79175947]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_round_eval(self):
val = np.array([[-1.2, 2, -3.4], [4.6, -5, 6.7]], dtype=np.float32)
v = mb.round(x=val)
expected_outputs = np.array([[-1, 2, -3], [5, -5, 7]], dtype=np.float32)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_rsqrt_eval(self):
val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
v = mb.rsqrt(x=val)
expected_outputs = np.array(
[[1.0, 0.70710678, 0.57735027], [0.5, 0.4472136, 0.40824829]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_sign_eval(self):
val = np.array([[-1, 2, 0], [0, -5, 6]], dtype=np.float32)
v = mb.sign(x=val)
expected_outputs = np.array([[-1, 1, 0], [0, -1, 1]], dtype=np.float32)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_sin_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.sin(x=val)
expected_outputs = np.array(
[
[-0.84147098, 0.90929743, -0.14112001],
[-0.7568025, 0.95892427, -0.2794155],
],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_sinh_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.sinh(x=val)
expected_outputs = np.array(
[[-1.1752, 3.62686, -10.017874], [27.289917, -74.20321, 201.71315]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_sqrt_eval(self):
val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
v = mb.sqrt(x=val)
expected_outputs = np.array(
[[1.0, 1.41421356, 1.73205081], [2.0, 2.23606798, 2.44948974]],
dtype=np.float32,
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_tan_eval(self):
val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.tan(x=val)
expected_outputs = np.array(
[[-1.5574, -2.185, 0.1425], [1.15782, 3.3805, -0.291]], dtype=np.float32
)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_tanh_eval(self):
x_val = np.array([[-1, 2, -3], [4, -5, 6]], dtype=np.float32)
v = mb.tanh(x=x_val)
np.testing.assert_allclose(np.tanh(x_val), v.val, atol=1e-04, rtol=1e-05)
@ssa_fn
def test_builder_threshold_eval(self):
val = np.array([[-1.2, 2, -3.4], [4.5, -5, 6.7]], dtype=np.float32)
v = mb.threshold(x=val, alpha=1.0)
expected_outputs = np.array([[1.0, 2, 1.0], [4.5, 1.0, 6.7]], dtype=np.float32)
np.testing.assert_allclose(expected_outputs, v.val, atol=1e-04, rtol=1e-05)
def test_cast_with_symbolic_value(self):
input_shape = [get_new_symbol(), 1]
input_placeholders = {
"x": mb.placeholder(shape=input_shape),
}
def build(x):
shape = mb.shape(x=x)
return mb.cast(x=shape, dtype="int32")
with Function(input_placeholders) as ssa_func:
output_vars = build(**ssa_func.inputs)
assert is_compatible_symbolic_vector(output_vars.sym_val, [get_new_symbol(), 1])
@pytest.mark.parametrize(
"compute_unit, backend, epsilon",
itertools.product(
compute_units,
backends,
[1e-3, 1e-1, 1.0],
),
)
def test_builder_to_backend_stress_inverse(
self, compute_unit, backend, epsilon
):
x = np.array([[1, -2, 3], [4, -5, 6]], dtype=np.float32)
numpy_pred = 1 / (x + epsilon)
input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)}
input_value_dict = {"x": x}
def build(x):
return mb.inverse(x=x, epsilon=epsilon)
expected_output_type = x.shape + (types.fp32,)
run_compare_builder(
build,
input_placeholder_dict,
input_value_dict,
expected_output_type,
numpy_pred,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend, epsilon",
itertools.product(
compute_units,
backends,
[1e-3, 1e-1, 1.0],
),
)
def test_builder_to_backend_stress_rsqrt(
self, compute_unit, backend, epsilon
):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
numpy_pred = 1.0 / np.sqrt(x + epsilon)
input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)}
input_value_dict = {"x": x}
def build(x):
return mb.rsqrt(x=x, epsilon=epsilon)
expected_output_type = x.shape + (types.fp32,)
run_compare_builder(
build,
input_placeholder_dict,
input_value_dict,
expected_output_type,
numpy_pred,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend, epsilon",
itertools.product(
compute_units,
backends,
[1e-3, 1e-1, 1.0],
),
)
def test_builder_to_backend_stress_log(
self, compute_unit, backend, epsilon
):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
numpy_pred = np.log(x + epsilon)
input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)}
input_value_dict = {"x": x}
def build(x):
return mb.log(x=x, epsilon=epsilon)
expected_output_type = x.shape + (types.fp32,)
run_compare_builder(
build,
input_placeholder_dict,
input_value_dict,
expected_output_type,
numpy_pred,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend, src_dst",
itertools.product(
compute_units,
backends,
[("fp16", "fp32"), ("fp32", "fp16")],
),
)
def test_builder_to_backend_stress_cast(
self, compute_unit, backend, src_dst
):
src_dtype, dst_dtype = src_dst
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
numpy_pred = x.astype(dtype=np.float16)
input_placeholder_dict = {"x": mb.placeholder(shape=x.shape)}
input_value_dict = {"x": x}
def build(x):
x = mb.cast(x=x, dtype=src_dtype)
x = mb.square(x=x)
x = mb.cast(x=x, dtype=dst_dtype)
x = mb.sqrt(x=x)
x = mb.cast(x=x, dtype="fp32")
return x
expected_output_type = x.shape + (types.fp32,)
run_compare_builder(
build,
input_placeholder_dict,
input_value_dict,
expected_output_type,
numpy_pred,
compute_unit=compute_unit,
backend=backend,
)
def test_erf_value_inference(self):
INPUT_SIZE=(2, 3, 4)
rs = np.random.RandomState(1234)
x = rs.random(INPUT_SIZE)
@mb.program(input_specs=[])
def prog():
return mb.erf(x=x)
ops = list(prog.functions.values())[0].operations
assert len(ops) == 2
assert ops[0].op_type == 'const'
erf_op = ops[1]
assert erf_op.op_type == 'erf'
np.testing.assert_allclose(erf_op.value_inference(), scipy.special.erf(x), atol=1e-04, rtol=1e-05)
| bsd-3-clause | f8cd5be6ae47ae4c6ecc16ab1898e7b5 | 34.19186 | 106 | 0.49397 | 3.031047 | false | true | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/tests/test_normalization.py | 1 | 25998 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import itertools
import platform
import numpy as np
import pytest
import coremltools as ct
from coremltools._deps import (_HAS_TF_2, _HAS_TORCH, MSG_TF2_NOT_FOUND,
MSG_TORCH_NOT_FOUND)
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Function, get_new_symbol, types
from coremltools.converters.mil.testing_reqs import backends, compute_units
from coremltools.converters.mil.testing_utils import random_gen
from .testing_utils import UNK_SYM, run_compare_builder
if _HAS_TORCH:
import torch
if _HAS_TF_2:
import tensorflow as tf
class TestNormalizationBatchNorm:
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke(self, compute_unit, backend):
x_val = np.array(
[
[
[[-16.0, 13.0], [11.0, -16.0]],
[[13.0, -15.0], [13.0, 9.0]],
[[-9.0, -4.0], [-6.0, 3.0]],
]
],
dtype=np.float32,
)
mean_val = np.array([9.0, 6.0, 3.0], dtype=np.float32)
variance_val = np.array([6.0, 1.0, 7.0], dtype=np.float32)
gamma_val = np.array([1.0, 1.0, 1.0], dtype=np.float32)
beta_val = np.array([1.0, 3.0, 0.0], dtype=np.float32)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return [
mb.batch_norm(x=x, mean=mean_val, variance=variance_val),
mb.batch_norm(
x=x,
mean=mean_val,
variance=variance_val,
gamma=gamma_val,
beta=beta_val,
epsilon=1e-4,
),
]
expected_output_types = [
(1, 3, 2, 2, types.fp32),
(1, 3, 2, 2, types.fp32),
]
expected_outputs = [
np.array(
[
[
[[-10.206199, 1.6329918], [0.8164959, -10.206199]],
[[6.999965, -20.999895], [6.999965, 2.9999852]],
[[-4.53557, -2.6457493], [-3.4016776, 0.0]],
]
],
dtype=np.float32,
),
np.array(
[
[
[[-9.206122, 2.6329796], [1.8164899, -9.206122]],
[[9.99965, -17.998951], [9.99965, 5.9998503]],
[[-4.535541, -2.6457324], [-3.4016557, 0.0]],
]
],
dtype=np.float32,
),
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
class TestNormalizationInstanceNorm:
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke(self, compute_unit, backend):
x_val = np.array(
[
[
[[-16.0, 13.0], [11.0, 16.0]],
[[13.0, 15.0], [13.0, 9.0]],
[[-9.0, 4.0], [-6.0, 3.0]],
],
[
[[-5.0, 1.0], [12.0, 3.0]],
[[0.0, 9.0], [2.0, -8.0]],
[[2.0, 5.0], [10.0, 0.0]],
]
],
dtype=np.float32,
)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return mb.instance_norm(x=x, epsilon=1e-2)
expected_output_types = [(2, 3, 2, 2, types.fp32)]
expected_outputs = [
np.array(
[
[
[[-1.71524656, 0.54576027], [0.38982874, 0.77965748]],
[[0.22917463, 1.14587319], [0.22917463, -1.60422242]],
[[-1.2470212, 1.06887531], [-0.71258354, 0.89072943]],
],
[
[[-1.27070526, -0.28693344], [1.51664821, 0.04099049]],
[[-0.12380638, 1.36187018], [0.20634397, -1.44440776]],
[[-0.59714057, 0.19904686], [1.5260259, -1.12793219]],
]
],
dtype=np.float32,
)
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke_with_gamma_and_beta(self, compute_unit, backend):
x_val = np.array(
[
[
[[-16.0, 13.0], [11.0, 16.0]],
[[13.0, 15.0], [13.0, 9.0]],
[[-9.0, 4.0], [-6.0, 3.0]],
],
[
[[-5.0, 1.0], [12.0, 3.0]],
[[0.0, 9.0], [2.0, -8.0]],
[[2.0, 5.0], [10.0, 0.0]],
]
],
dtype=np.float32,
)
gamma_val = np.array([-9.0, 3.2, 1.3], dtype=np.float32)
beta_val = np.array([-0.8, 3.4, 1.2], dtype=np.float32)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return mb.instance_norm(x=x, gamma=gamma_val, beta=beta_val, epsilon=1e-2)
expected_output_types = [(2, 3, 2, 2, types.fp32)]
expected_outputs = [
np.array(
[
[
[[14.63721807, -5.71184211], [-4.30845865, -7.8169173]],
[[4.1333588, 7.06679399], [4.1333588, -1.73351158]],
[[-0.42112757, 2.58953791], [0.27364139, 2.35794826]],
],
[
[[10.6363473, 1.782401], [-14.44983388, -1.16891443]],
[[3.00381959, 7.75798456], [4.06030069, -1.22210484]],
[[0.42371726, 1.45876091], [3.18383368, -0.26631185]],
]
],
dtype=np.float32,
)
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND)
@pytest.mark.parametrize(
"rank, compute_unit, backend, epsilon",
itertools.product(
[3, 4],
compute_units,
backends,
[1e-3, 1e-5, 1e-10]
),
)
def test_builder_to_backend_stress(self, rank, compute_unit, backend, epsilon):
shape = np.random.randint(low=2, high=6, size=rank)
x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return mb.instance_norm(x=x, epsilon=epsilon)
layer = torch.nn.InstanceNorm2d if rank == 4 else torch.nn.InstanceNorm1d
torch_op = layer(num_features=shape[1], eps=epsilon)
expected_outputs = [torch_op(torch.as_tensor(x_val)).numpy()]
expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
atol=1e-3,
rtol=1e-4,
also_compare_shapes=True
)
class TestNormalizationL2Norm:
@staticmethod
def _compute_l2_norm(val, eps):
shape = val.shape
rank = len(shape)
batch_dims = rank - 3
if batch_dims == 0:
square_sum = np.sum(val**2)
output = val/np.power(square_sum + eps, 0.5)
else:
batch_dim_prod = np.prod(shape[:batch_dims])
reshape_val = np.reshape(val, (batch_dim_prod, -1))
square_sum = np.sum(reshape_val * reshape_val, axis=1, keepdims=True) + eps
output = reshape_val/np.power(square_sum, 0.5)
output = np.reshape(output, shape)
return output
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke(self, compute_unit, backend):
x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return [mb.l2_norm(x=x, epsilon=1e-10)]
expected_output_types = [(1, 3, 2, types.fp32)]
expected_outputs = [
np.array(
[
[
[0.08304548, -0.58131838],
[0.41522741, -0.4982729],
[-0.24913645, -0.41522741],
]
],
dtype=np.float32,
)
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend, rank, epsilon",
itertools.product(
compute_units,
backends,
[3, 4, 5],
[1e-4, 5.7]
)
)
def test_builder_to_backend_stress(self, compute_unit, backend, rank, epsilon):
shape = np.random.randint(low=2, high=6, size=rank)
x_val = random_gen(shape=shape, rand_min=-1.0, rand_max=1.0)
input_placeholders = {"x": mb.placeholder(shape=shape)}
input_values = {"x": x_val}
def build(x):
return [mb.l2_norm(x=x, epsilon=epsilon)]
output = TestNormalizationL2Norm._compute_l2_norm(x_val, epsilon)
expected_output_types = [list(output.shape) + [types.fp32]]
expected_outputs = [
output
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize("rank, epsilon",
itertools.product(
[3, 4, 5],
[1e-4, 11.2],
),
)
def test_builder_eval_stress(self, rank, epsilon):
shape = np.random.randint(low=2, high=6, size=rank)
x_val = random_gen(shape=shape, rand_min=-1, rand_max=1)
with Function({}):
res = mb.l2_norm(x=x_val, epsilon=epsilon)
ref = TestNormalizationL2Norm._compute_l2_norm(x_val, epsilon)
np.testing.assert_allclose(ref, res.val, atol=1e-6, rtol=1e-5)
class TestNormalizationLayerNorm:
@staticmethod
def _keras_layer_norm(x, axes, epsilon):
layer = tf.keras.layers.LayerNormalization(axis=axes, epsilon=epsilon)
data = tf.constant(x, dtype=tf.float32)
output = layer(data)
return output.numpy()
@staticmethod
def _np_layer_norm(x, axes, gamma=None, beta=None, epsilon=1e-5):
rank = len(x.shape)
axes = [axis + rank if axis < 0 else axis for axis in axes]
normalized_shape = [x.shape[i] if i in axes else 1 for i in range(rank)]
gamma = np.ones(shape=normalized_shape) if gamma is None else np.reshape(gamma, normalized_shape)
beta = np.zeros(shape=normalized_shape) if beta is None else np.reshape(beta, normalized_shape)
num = x - np.mean(x, axis=tuple(axes), keepdims=True)
dem = np.sqrt(
np.sum(np.square(num), axis=tuple(axes), keepdims=True)
/ np.prod(normalized_shape)
+ epsilon
)
return num / dem * gamma + beta
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke(self, compute_unit, backend):
x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
gamma_val = np.array([1.0, 1.0], dtype=np.float32)
beta_val = np.array([1.0, 0.0], dtype=np.float32)
def build(x):
return [
# V2->V1 lowering (op_mappings.py): if branch
mb.layer_norm(x=x, axes=[2], epsilon=1e-4),
# V2->V1 lowering (op_mappings.py): else branch
mb.layer_norm(x=x, axes=[-2, -1], epsilon=1e-4),
# V2->V1 lowering (op_mappings.py): if branch with scale
mb.layer_norm(x=x, axes=[2], epsilon=1e-4, gamma=gamma_val, beta=beta_val),
]
expected_output_types = [(1, 3, 2, types.fp32), (1, 3, 2, types.fp32), (1, 3, 2, types.fp32)]
expected_outputs = [
np.array(
[
[
[0.9999969, -0.9999969 ],
[0.99999833, -0.99999833],
[0.99995005, -0.99995005],
]
],
dtype=np.float32,
),
np.array(
[
[
[0.82687193, -1.06312108],
[1.77186835, -0.82687193],
[-0.11812456, -0.59062278],
]
],
dtype=np.float32,
),
np.array(
[
[
[1.9999969, -0.9999969 ],
[1.99999833, -0.99999833],
[1.99995005, -0.99995005],
]
],
dtype=np.float32,
),
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke_rank_2(self, compute_unit, backend):
x_val = np.array([[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]], dtype=np.float32)
gamma_val = np.array([1.0, 1.0], dtype=np.float32)
beta_val = np.array([1.0, 0.0], dtype=np.float32)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return [
# V2->V1 lowering (op_mappings.py): if branch
mb.layer_norm(x=x, axes=[1], epsilon=1e-4),
mb.layer_norm(x=x, axes=[1], epsilon=1e-4, gamma=gamma_val, beta=beta_val)
]
expected_output_types = [(3, 2, types.fp32), (3, 2, types.fp32)]
expected_outputs = [
np.array(
[
[ 0.9999969, -0.9999969 ],
[ 0.99999833, -0.99999833],
[ 0.99995005, -0.99995005],
],
dtype=np.float32,
),
np.array(
[
[ 1.9999969, -0.9999969 ],
[ 1.99999833, -0.99999833],
[ 1.99995005, -0.99995005],
],
dtype=np.float32,
),
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke_with_dynamic_shape(self, compute_unit, backend):
x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32)
shape = (get_new_symbol(), get_new_symbol(), 2)
input_placeholders = {"x": mb.placeholder(shape=shape)}
input_values = {"x": x_val}
def build(x):
return [
mb.layer_norm(x=x, axes=[2], epsilon=1e-4),
]
expected_output_types = [(UNK_SYM, UNK_SYM, 2, types.fp32)]
expected_outputs = [
np.array(
[
[
[ 0.9999969, -0.9999969 ],
[ 0.99999833, -0.99999833],
[ 0.99995005, -0.99995005],
]
],
dtype=np.float32,
),
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize(
"compute_unit, backend, rank_and_axes, epsilon, provides_gamma_beta",
itertools.product(
compute_units,
backends,
[
[3, [0, 2]],
[3, [-2]],
[4, [0, 1, 3]],
[5, [0, 4]],
[5, [-5, -4, -3, -2, -1]]
],
[0.0001, 0.01],
[True, False]
),
)
def test_builder_to_backend_stress_numpy(self, compute_unit, backend, rank_and_axes, epsilon, provides_gamma_beta):
if backend == ("mlprogram", "fp16") and compute_unit != ct.ComputeUnit.CPU_ONLY:
pytest.xfail("rdar://80662357 ([GPU failures] LayerNorm FP16 tests failing on GPU with numerical errors)")
if backend[0] == "neuralnetwork" and compute_unit != ct.ComputeUnit.CPU_ONLY and platform.machine() == "arm64":
pytest.xfail("rdar://98015195 ([M1 native tests] Some MIL unittests are failing on M1 native)")
rank, axes = rank_and_axes
shape = np.random.randint(low=2, high=6, size=rank)
x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
gamma, beta = None, None
if provides_gamma_beta:
positive_axes = [axis+rank if axis < 0 else axis for axis in axes]
normalized_shape = [shape[i] for i in range(rank) if i in positive_axes]
gamma = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100)
beta = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100)
def build(x):
return [
mb.layer_norm(x=x, axes=axes, epsilon=epsilon, gamma=gamma, beta=beta)
]
output = TestNormalizationLayerNorm._np_layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma, beta=beta)
expected_output_types = [tuple(output.shape) + (types.fp32,)]
expected_outputs = [
output
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
atol=1e-3,
rtol=1e-4,
)
@pytest.mark.skipif(not _HAS_TF_2, reason=MSG_TF2_NOT_FOUND)
@pytest.mark.parametrize(
"compute_unit, backend, rank_and_axes, epsilon",
itertools.product(
compute_units,
backends,
[
[3, [0, 2]],
[3, [-2]],
[4, [0, 1, 3]],
[5, [0, 4]],
[5, [-5, -4, -3, -2, -1]]
],
[0.0001, 0.01]
),
)
def test_builder_to_backend_stress_keras(self, compute_unit, backend, rank_and_axes, epsilon):
rank, axes = rank_and_axes
shape = np.random.randint(low=2, high=6, size=rank)
x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return [
mb.layer_norm(x=x, axes=axes, epsilon=epsilon)
]
output = TestNormalizationLayerNorm._keras_layer_norm(x=x_val, axes=axes, epsilon=epsilon)
expected_output_types = [tuple(output.shape) + (types.fp32,)]
expected_outputs = [
output
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.parametrize("rank_and_axes, epsilon",
itertools.product(
[
[3, [0, 2]],
[3, [-2, -1]],
[4, [0, 1, 2, 3]],
[5, [0, 2, -1]],
[5, [-5, -4, -3, -2, -1]]
],
[0.0001, 0.01],
),
)
def test_builder_eval_stress(self, rank_and_axes, epsilon):
rank, axes = rank_and_axes
shape = np.random.randint(low=2, high=6, size=rank)
x_val = random_gen(shape=shape, rand_min=-100.0, rand_max=100.0)
positive_axes = [axis+rank if axis < 0 else axis for axis in axes]
normalized_shape = [shape[i] for i in range(rank) if i in positive_axes]
gamma_val = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100)
beta_val = random_gen(shape=normalized_shape, rand_min=-100, rand_max=100)
with Function({}):
res = mb.layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma_val, beta=beta_val)
ref = TestNormalizationLayerNorm._np_layer_norm(x=x_val, axes=axes, epsilon=epsilon, gamma=gamma_val, beta=beta_val)
np.testing.assert_allclose(ref, res.val, atol=1e-04, rtol=1e-05)
class TestNormalizationLocalResponseNorm:
@pytest.mark.parametrize(
"compute_unit, backend", itertools.product(compute_units, backends,)
)
def test_builder_to_backend_smoke(self, compute_unit, backend):
x_val = np.array([[[1.0, -7.0], [5.0, -6.0], [-3.0, -5.0]]], dtype=np.float32)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return [
mb.local_response_norm(x=x, size=2),
mb.local_response_norm(x=x, size=3, alpha=0.0001, beta=0.75, k=1.0),
]
expected_output_types = [(1, 3, 2, types.fp32), (1, 3, 2, types.fp32)]
expected_outputs = [
np.array(
[
[
[0.99996257, -6.98716545],
[4.99531746, -5.99191284],
[-2.99898791, -4.99531746],
]
],
dtype=np.float32,
),
np.array(
[
[
[0.99997497, -6.99143696],
[4.99687672, -5.99460602],
[-2.99932504, -4.99687672],
]
],
dtype=np.float32,
),
]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
)
@pytest.mark.skipif(not _HAS_TORCH, reason=MSG_TORCH_NOT_FOUND)
@pytest.mark.parametrize(
"compute_unit, backend, rank, size, alpha, beta, k",
itertools.product(
compute_units,
backends,
[rank for rank in range(3, 6)],
[2, 3, 5],
[0.0001, 0.01],
[0.75, 1.0],
[1.0, 2.0],
),
)
def test_builder_to_backend_stress(
self, compute_unit, backend, rank, size, alpha, beta, k
):
shape = np.random.randint(low=2, high=5, size=rank)
x_val = random_gen(shape=shape)
input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}
input_values = {"x": x_val}
def build(x):
return mb.local_response_norm(x=x, size=size, alpha=alpha, beta=beta, k=k)
torch_lrn = torch.nn.LocalResponseNorm(size=size, alpha=alpha, beta=beta, k=k)
expected_outputs = [torch_lrn(torch.as_tensor(x_val)).numpy()]
expected_output_types = [o.shape[:] + (types.fp32,) for o in expected_outputs]
run_compare_builder(
build,
input_placeholders,
input_values,
expected_output_types,
expected_outputs,
compute_unit=compute_unit,
backend=backend,
atol=1e-2,
rtol=1e-3,
)
| bsd-3-clause | 41d480d8e17e234a1b9ec396832bd720 | 33.617843 | 128 | 0.482729 | 3.534738 | false | true | false | false |
apple/coremltools | coremltools/converters/mil/mil/passes/conv_batchnorm_fusion.py | 1 | 6606 | # Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil.passes.helper import block_context_manager
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
def _try_to_transform(conv_op, bn_op, block):
# get parameters from batch_norm layer
gamma = bn_op.gamma.val
beta = bn_op.beta.val
mean = bn_op.mean.val
variance = bn_op.variance.val
epsilon = bn_op.epsilon.val
# get weight, bias and groups from conv layer
if conv_op.weight.val is None:
return False
conv_weight = conv_op.weight.val
conv_bias = conv_op.bias
groups = conv_op.groups.val
# get type of the conv layer
is_deconv = conv_op.op_type == 'conv_transpose'
# The deconv weight transpose axes is determined by the dimension of convolution.
# Conv1d should be [1, 0, 2], Conv2d should be [1, 0, 2, 3], Conv3d should be [1, 0, 2, 3, 4]
if not 3 <= len(conv_weight.shape) <= 5:
raise AssertionError(f"Only supports Conv1/2/3d, which means weight's dimension should between 3 and 5, "
f"but got weight with {len(conv_weight.shape)} dimensions. ")
deconv_weight_transpose_axes = [1, 0] + [axis for axis in range(2, len(conv_weight.shape))]
# D_in denotes the spatial dimensions for conv kernel weight
# for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]
# for conv, conv_weight has shape [Cout, Cin / groups, *D_in]
if is_deconv:
Cout = conv_weight.shape[1] * groups
Cin = conv_weight.shape[0]
else:
Cout = conv_weight.shape[0]
Cin = conv_weight.shape[1] * groups
# get the type of the conv weight
conv_weight_type = conv_weight.dtype
# create bias for conv if not exist
if conv_bias is None:
conv_bias = np.zeros(Cout)
else:
conv_bias = conv_bias.val
conv_bias = conv_bias.astype(conv_weight_type)
# get the original shape of weight and bias
origin_weight_shape = conv_weight.shape
origin_bias_shape = conv_bias.shape
# update the weight for conv layer
new_conv_weight = []
new_conv_bias = []
if is_deconv:
conv_weight = np.transpose(conv_weight, deconv_weight_transpose_axes)
conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))
for i in range(Cout):
# get batch norm parameters for each channel
_gamma = gamma[i]
_beta = beta[i]
_mean = mean[i]
_variance = variance[i]
_scale = _gamma / np.sqrt(_variance + epsilon)
# get conv weight and bias for each channel
_conv_weight = conv_weight[i]
_conv_bias = conv_bias[i]
# update the conv weight and bias
_conv_weight = _conv_weight * _scale
_conv_bias = _scale * (_conv_bias - _mean) + _beta
new_conv_weight.append(_conv_weight)
new_conv_bias.append(_conv_bias)
new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)
new_conv_bias = np.array(new_conv_bias).astype(conv_weight_type)
if is_deconv:
new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))
new_conv_weight = np.transpose(new_conv_weight, deconv_weight_transpose_axes)
# make sure the updated weight and bias have the same shape as the original ones
if new_conv_weight.shape != origin_weight_shape:
raise AssertionError("conv weight should have the same shape before and after the fuse_conv_batchnorm pass. ")
if new_conv_bias.shape != origin_bias_shape:
raise AssertionError("conv bias should have the same shape before and after the fuse_conv_batchnorm pass. ")
# create a new conv op with the new bias value, copying rest of the attributes
out_name = bn_op.outputs[0].name
conv_kargs = {"weight": new_conv_weight, "bias": new_conv_bias, "name": out_name, "before_op": conv_op}
for k, v in conv_op.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
if bn_op.enclosing_block.try_replace_uses_of_var_after_op(
anchor_op=bn_op,
old_var=bn_op.outputs[0],
new_var=x,
):
bn_op.enclosing_block.remove_ops([conv_op, bn_op])
return True
return False
@block_context_manager
def _fuse_conv_batchnorm_block(block):
def _match_pattern(op):
if op.op_type == "conv" or op.op_type == "conv_transpose":
# abort fusion if op output is also a block output
if op.outputs[0] in op.enclosing_block.outputs:
return None
# find batch_norm op
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
bn_op_candidate = list(child_ops)[0]
if bn_op_candidate.op_type == "batch_norm":
return bn_op_candidate
return None
fusion_occurred = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = _fuse_conv_batchnorm_block(b)
if len(op.blocks) > 0:
# This op can't be conv or conv_transpose
continue
bn_op = _match_pattern(op)
if bn_op is not None:
fusion_occurred = _try_to_transform(op, bn_op, block)
# has to break as the downstream iterator is affected.
if fusion_occurred:
return fusion_occurred
return fusion_occurred
@register_pass(namespace="common")
class fuse_conv_batchnorm(AbstractGraphPass):
"""
Fuse the following batch_norm layer into conv and conv_transpose
That is, convert conv + batch_norm to conv, by modifying the weight and bias in the conv layer
Given:
%2 = conv(%1)
...
%3 = batch_norm(%2)
...
Result:
%3 = conv(%1)
...
"""
def apply(self, prog):
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = _fuse_conv_batchnorm_block(f)
| bsd-3-clause | e06ae0ad3516aa0452b788a8d6f768c7 | 35.7 | 118 | 0.622313 | 3.495238 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/var.py | 1 | 10712 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types import builtin_to_string
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
class Var:
"""
Var represents the outputs of an Operation. Most Vars are derived from an
Operation (including const), and all Vars must have `sym_type`.
Example Usage:
from coremltools.converters.mil.mil import (
Builder as mb,
Function,
types
)
func_inputs = {"a": mb.placeholder(shape=(1,2)),
"b": mb.placeholder(shape=(1,2)) }
with Function(func_inputs) as ssa_func:
a, b = ssa_func.inputs["a"], ssa_func.inputs["b"]
res = mb.add(x=a, y=b) # res is Var
assert types.is_tensor(res.sym_type)
assert res.rank == 2
assert res.dtype == types.float # since a, b are by default float
# value is not available at compile time in this case. If
# materializable, res.val would be a numpy / primitive value
assert res.val is None
Comment: Except InternalVar and Vars created in while_loop and by
placeholder, all Var should only be constructed by Operation to represent
outputs.
Comment: Var hides the details of sym_type vs sym_val vs materialized
value, which was represented by 2 objects prior to refactoring.
# Properties:
name: (str)
name in MIL proto NamedValueType. Name is assigned by the parent
Operation.
sym_type [_sym_type]: (builtin type class)
All Var must have a (possibly symbolic) type, usually derived from
type inference of upstream ops or from default values in _Input.
sym_val [_sym_val]: (builtin type instance)
Possibly symbolic value.
val [_sym_val]: (np.ndarray or python primitive scalar)
Numpy (scalar / tensor) value. `val` is not None iff `sym_val` is
not None and does not contain symbols. Read-only.
op [_op]: (Operation)
The Operation this Var is derived from. May not be None except
for InternalVar. Read-only.
op_output_idx: (int)
Idx of the output from Operation corresponding to _Input. May be
None.
child_ops [_child_ops]: list[Operation]
Ops that take this Var as an input.
nonreplaceable_vars_upstream: set[Var]
Set that consists of nonreplaceable vars upstream
"""
__slots__ = [
"name",
"_sym_type",
"_sym_val",
"_op",
"op_output_idx",
"_child_ops",
"consuming_blocks",
"_nonreplaceable_vars_upstream",
]
def __init__(self, name, sym_type, sym_val=None, op=None, op_output_idx=None):
"""
sym_type (builtin type)
sym_val (builtin value)
op (Operation)
op_output_idx (int)
"""
self.name = name
self._sym_type = sym_type
self._sym_val = sym_val
self._op = op
self.op_output_idx = op_output_idx
# An op can appear twice if it consumes a var twice (e.g.,
# add(%1, %1), while_loop(loop_vars=(%1, %1)).
self._child_ops = list()
# A variable may not be consumed by any op (i.e. len(self._child_ops)
# == 0) but is still used as block output. A var can be output of
# multiple blocks (e.g., both current block and nested blocks)
self.consuming_blocks = list()
# replaceability
self._nonreplaceable_vars_upstream = set()
self._set_nonreplaceable_vars_upstream()
@property
def nonreplaceable_vars_upstream(self):
return self._nonreplaceable_vars_upstream
@nonreplaceable_vars_upstream.setter
def nonreplaceable_vars_upstream(self, val):
assert isinstance(val, set)
self._nonreplaceable_vars_upstream = val
@staticmethod
def _is_nonreplaceable_var(var):
op = var.op
if op is None:
return False
return op.op_type.startswith("constexpr_")
def _set_nonreplaceable_vars_upstream(self):
"""
A utility function to set the value of the "nonreplaceable_vars_upstream" property.
If the var is an output of the constexpr op, then "nonreplaceable_vars_upstream" is a single element set, containing this var.
Otherwise, its a union of the "nonreplaceable_vars_upstream" sets of all the input vars of its parent op.
"""
op = self.op
if op is None:
return
if Var._is_nonreplaceable_var(self):
self.nonreplaceable_vars_upstream = set([self])
else:
flattened_inputs = op.get_flattened_inputs()
inputs_nonreplaceable_vars_upstream = [p.nonreplaceable_vars_upstream for p in flattened_inputs]
if len(inputs_nonreplaceable_vars_upstream) > 0:
self.nonreplaceable_vars_upstream = set.union(*inputs_nonreplaceable_vars_upstream)
def _reset_nonreplaceable_vars_upstream(self):
self.nonreplaceable_vars_upstream = set()
def can_be_replaced_by_var(self, new_var):
"""
A var can be replaced by a new var only if the new var's nonreplaceable_vars_upstream is the super set of the old one
"""
return self.nonreplaceable_vars_upstream.issubset(new_var.nonreplaceable_vars_upstream)
@property
def sym_type(self):
return self._sym_type
@property
def shape(self):
if types.is_tensor(self._sym_type):
return self._sym_type.get_shape()
return tuple()
@property
def rank(self):
return len(self.shape)
@property
def dtype(self):
if types.is_tensor(self._sym_type):
return self._sym_type.get_primitive()
return self._sym_type
@property
def sym_val(self):
if self._sym_val is None:
return None
return self._sym_val.val
@property
def val(self):
if self._sym_val is None or any_symbolic(self._sym_val.val):
return None
return self._sym_val.val
@property
def op(self):
return self._op
@property
def child_ops(self):
return self._child_ops
def add_child_op(self, new_op):
self._child_ops.append(new_op)
def remove_child_op(self, target_op, no_check=False):
if target_op not in self._child_ops:
if no_check:
return # no-op
msg = "Op {} does not takes Var {} as input"
raise ValueError(msg.format(target_op.name, self.name))
self._child_ops.remove(target_op)
def shape_str(self):
annotation = ""
if self.val is not None:
annotation = "*"
elif self.sym_val is not None:
annotation = "^"
shape_str = str(self.shape)[:-1] # trim the ")"
if self.rank > 1:
shape_str += ", "
if types.builtin_to_string(self.dtype) is None:
shape_str += ")" + annotation
else:
shape_str += types.builtin_to_string(self.dtype) + ")" + annotation
return shape_str
def type_str(self):
is_tensor = types.is_tensor(self.sym_type)
is_list = types.is_list(self.sym_type)
if is_tensor:
type_string = "(Tensor)"
elif is_list:
type_string = "(List)"
else:
type_string = "(Scalar)"
return type_string
def set_name(self, name):
self.name = name
def is_tensor_or_scalar_of(self, dtype: str):
return (types.is_tensor(self.sym_type) or types.is_scalar(self.sym_type)) and builtin_to_string(self.dtype) == dtype
def __str__(self):
return "%" + self.name + ": " + self.shape_str() + self.type_str()
class ListVar(Var):
__slots__ = ["_elem_type", "init_length", "dynamic_length"]
def __init__(
self, name, elem_type=None, init_length=None, dynamic_length=True, sym_val=None, **kwargs
):
"""
elem_type (builtin.tensor)
init_length (int): initial length
dynamic_length (bool): True to allow list to grow. False uses
init_length as the fixed size (init_length is runtime length).
sym_val: value of the list, if available
"""
super().__init__(
name=name,
sym_type=types.list(elem_type, init_length, dynamic_length),
sym_val=sym_val,
**kwargs
)
self._elem_type = elem_type
self.init_length = init_length
self.dynamic_length = dynamic_length
@property
def shape(self):
raise ValueError("shape not applicable to ListVar '{}'.".format(self.name))
@property
def rank(self):
raise ValueError("rank not applicable to ListVar '{}'".format(self.name))
@property
def dtype(self):
raise ValueError("dtype not applicable to ListVar '{}'".format(self.name))
@property
def elem_type(self):
return self._elem_type
@property
def elem_shape(self):
if self._elem_type == types.unknown:
return None
elif types.is_tensor(self._elem_type):
return self._elem_type.get_shape()
return ()
def shape_str(self):
length = "?"
if not self.dynamic_length:
length = str(self.init_length)
if self._elem_type == types.unknown:
return "List[{}, unknown]".format(length)
if self._elem_type == types.str:
return "List[{}, str]".format(length)
elif self._elem_type == types.int64:
return "List[{}, int]".format(length)
else:
elem_shape = self._elem_type.get_shape()
elem_dtype = self._elem_type.get_primitive()
shape_str = str(elem_shape)[:-1] # trim the ")"
if len(elem_shape) > 1:
shape_str += ", "
shape_str += types.builtin_to_string(elem_dtype) + ")"
return "List[{}, {}]".format(length, shape_str)
class InternalVar(Var):
"""
Internal Var (with '__' prefix and won't appear in SSA) will ALWAYS have
`sym_val == builtin.unknown`. InternalVar are constructed by builder only.
Comment: Internal Var can be used to represent diverse types such as enum
type `DataType.FLOAT32`.
"""
def __init__(self, val, name=None):
super().__init__(
name=name, sym_type=types.unknown, sym_val=types.unknown(val)
)
| bsd-3-clause | 3def84b0d3db17dc16d0370cdb12764d | 31.96 | 134 | 0.595314 | 3.814815 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/passes/test_merge_consecutive_paddings.py | 1 | 6426 | # Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
import pytest
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.testing_utils import (
apply_pass_and_basic_check, assert_model_is_valid, get_op_types_in_program)
np.random.seed(1984)
class TestMergeConsecutivePaddings:
def test_success_reflect(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))])
def prog(x1):
pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode='reflect')
pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode='reflect')
return pad2
prev_prog, _, block = apply_pass_and_basic_check(
prog, "common::merge_consecutive_paddings"
)
assert get_op_types_in_program(prev_prog) == ["pad", "pad"]
assert get_op_types_in_program(prog) == ["pad"]
inputs = {"x1": (1, 2, 6, 8)}
assert_model_is_valid(
prog,
inputs,
expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)},
)
@pytest.mark.parametrize("swap_axes", [False, True])
def test_success_different_rank1(self, swap_axes):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))])
def prog(x1):
if swap_axes:
pad1 = mb.pad(x=x1, pad=[1, 1], mode='reflect')
pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode='reflect')
else:
pad1 = mb.pad(x=x1, pad=[1, 1, 0, 0], mode='reflect')
pad2 = mb.pad(x=pad1, pad=[1, 1], mode='reflect')
return pad2
prev_prog, _, block = apply_pass_and_basic_check(
prog, "common::merge_consecutive_paddings"
)
assert get_op_types_in_program(prev_prog) == ["pad", "pad"]
assert get_op_types_in_program(prog) == ["pad"]
inputs = {"x1": (1, 2, 6, 8)}
assert_model_is_valid(
prog,
inputs,
expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)},
)
def test_success_constant(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))])
def prog(x1):
pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode='constant', constant_val=3.0)
pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode='constant', constant_val=3.0)
return pad2
prev_prog, _, block = apply_pass_and_basic_check(
prog, "common::merge_consecutive_paddings"
)
assert get_op_types_in_program(prev_prog) == ["pad", "pad"]
assert get_op_types_in_program(prog) == ["pad"]
pad_ops = [op for op in prog["main"].operations if op.op_type == "pad"]
assert pad_ops[0].inputs["constant_val"].val == 3.0
inputs = {"x1": (1, 2, 6, 8)}
assert_model_is_valid(
prog,
inputs,
expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)},
)
def test_success_3_layers(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))])
def prog(x1):
pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode='constant', constant_val=3.0)
pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode='constant', constant_val=3.0)
pad3 = mb.pad(x=pad2, pad=[1, 1, 0, 0], mode='constant', constant_val=3.0)
return pad3
prev_prog, _, block = apply_pass_and_basic_check(
prog, "common::merge_consecutive_paddings"
)
assert get_op_types_in_program(prev_prog) == ["pad", "pad", "pad"]
assert get_op_types_in_program(prog) == ["pad"]
pad_ops = [op for op in prog["main"].operations if op.op_type == "pad"]
assert pad_ops[0].inputs["constant_val"].val == 3.0
inputs = {"x1": (1, 2, 6, 8)}
assert_model_is_valid(
prog,
inputs,
expected_output_shapes={block.outputs[0].name: (1, 2, 10, 10)},
)
def test_failure_different_mode(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))])
def prog(x1):
pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode='reflect')
pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode='constant')
return pad2
prev_prog, _, block = apply_pass_and_basic_check(
prog, "common::merge_consecutive_paddings"
)
assert get_op_types_in_program(prev_prog) == ["pad", "pad"]
assert get_op_types_in_program(prog) == ["pad", "pad"]
inputs = {"x1": (1, 2, 6, 8)}
assert_model_is_valid(
prog,
inputs,
expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)},
)
def test_failure_different_constants(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))])
def prog(x1):
pad1 = mb.pad(x=x1, pad=[0, 0, 1, 1], mode='constant', constant_val=1.0)
pad2 = mb.pad(x=pad1, pad=[1, 1, 0, 0], mode='constant', constant_val=2.0)
return pad2
prev_prog, _, block = apply_pass_and_basic_check(
prog, "common::merge_consecutive_paddings"
)
assert get_op_types_in_program(prev_prog) == ["pad", "pad"]
assert get_op_types_in_program(prog) == ["pad", "pad"]
inputs = {"x1": (1, 2, 6, 8)}
assert_model_is_valid(
prog,
inputs,
expected_output_shapes={block.outputs[0].name: (1, 2, 8, 10)},
)
def test_failure_repeat_on_same_axis(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 6, 8))])
def prog(x1):
pad1 = mb.pad(x=x1, pad=[1, 1], mode='reflect')
pad2 = mb.pad(x=pad1, pad=[1, 1], mode='reflect')
return pad2
prev_prog, _, block = apply_pass_and_basic_check(
prog, "common::merge_consecutive_paddings"
)
assert get_op_types_in_program(prev_prog) == ["pad", "pad"]
assert get_op_types_in_program(prog) == ["pad", "pad"]
inputs = {"x1": (1, 2, 6, 8)}
assert_model_is_valid(
prog,
inputs,
expected_output_shapes={block.outputs[0].name: (1, 2, 6, 12)},
)
| bsd-3-clause | 9d5b8fdb28504bc6caa550b27e2e096f | 35.101124 | 86 | 0.532213 | 3.139228 | false | true | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/defs/iOS15/scatter_gather.py | 1 | 16508 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import Operation, types
from coremltools.converters.mil.mil.input_type import (DefaultInputs,
InputSpec,
TensorInputType)
from coremltools.converters.mil.mil.operation import (SYMBOL, VALUE,
precondition)
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.ops.defs._utils import compute_gather
from coremltools.converters.mil.mil.types.symbolic import (
is_compatible_symbolic_vector)
@register_op
class gather(Operation):
"""
Gather slices from input ``x`` along dimension ``axis`` according to ``indices``,
similar to `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.
* If ``indices`` is scalar (0-D):
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~~~ indices, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]
Where ``rank(x)`` is the rank of ``x``. The ``output`` has rank ``rank(x) - 1``.
* If ``indices`` is 1-D tensor:
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~~~~~~ i, ~~~~~~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~~ indices[i], ~~~~~~~~ p_{axis+1}, ..., p_{rank(*D)-1}]
The output has rank ``rank(x)``.
* In general:
.. math::
output[p_0, ..., p_{axis-1}, ~~~~~~~~ i_0, ..., i_{M-1}, ~~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}] =
.. math::
x[p_0, ..., p_{axis-1}, ~~~~~~~ indices[i_0, ..., i_{M-1}], ~~~~~~~ p_{axis+1}, ..., p_{rank(x)-1}]
Where ``M = rank(indices)``.
Parameters
----------
x: tensor<\*D, T> (Required)
indices: tensor<\*N, i32> (Required)
* Indices values may be negative. More precisely, ``-D[axis]<= v < D[axis]`` for ``v`` in ``indices``.
axis: const i32 (Optional. Default=``0``)
* Negative axis is supported.
Returns
-------
tensor<\*K, T>
* Where ``K = D[:axis] + N + D[axis+1:]``.
Attributes
----------
T: fp16, fp32, i32
References
----------
See `tf.gather <https://www.tensorflow.org/api_docs/python/tf/gather>`_.
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
indices=TensorInputType(type_domain=types.int32),
axis=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32),
}
def default_inputs(self):
return DefaultInputs(
axis=0,
)
@precondition(allow=VALUE | SYMBOL)
def value_inference(self):
x = self.x.sym_val
indices = self.indices.val
if indices is None:
# only allow x to be symbolic. indices cannot.
return None
return compute_gather(
params=self.x.sym_val,
indices=self.indices.val,
axis=self.axis.val,
batch_dims=0
)
def type_inference(self):
out_type = self.x.dtype
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
output_rank = self.x.rank - 1 + self.indices.rank
if output_rank == 0:
# output scalar
return out_type
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
out_shape = self.x.shape[:axis] + self.indices.shape + self.x.shape[axis + 1 :]
return types.tensor(out_type, out_shape)
@register_op
class scatter(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices`` at dimension ``axis``
by operation ``mode``.
Example: ``mode == update``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
* For ``j != i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Example: ``mode == add``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
output[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
.. math::
x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]
* For ``j != i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Parameters
----------
data: tensor<\*D, T> (Required)
indices: tensor<[C], i32> (Required)
* 1-D tensor.
updates: tensor<\*K, T> (Required)
* ``K = data.shape[:axis] + [len(indices)] + data.shape[axis+1:]``.
axis: const i32 (Optional)
* Default to ``0``.
mode: const string (Optional)
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
* Default value is ``update``.
Returns
-------
tensor<\*D, T>
* With the same type and shape as input ``x``.
Attributes
----------
T: fp16, fp32, i32
For example:
data = [[1, 2, 3], [4, 5, 6]]
indices = [1, 0]
updates = [[5, 6, 7], [8, 9, 10]]
axis = 0
mode = "update"
produces:
[[9, 11, 13], [9, 11, 13]]
"""
input_spec = InputSpec(
data=TensorInputType(type_domain="T"),
indices=TensorInputType(type_domain=types.int32),
updates=TensorInputType(type_domain="T"),
axis=TensorInputType(const=True, optional=True, type_domain=types.int32),
mode=TensorInputType(const=True, optional=True, type_domain=types.str),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32),
}
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
expected_updates_shape = (
self.data.shape[:axis] + self.indices.shape + self.data.shape[axis + 1 :]
)
err = "Updates shape {} is incorrect. It should be {}.".format(self.updates.shape, expected_updates_shape)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
), err
return self.data.sym_type
@register_op
class gather_along_axis(Operation):
"""
Take the values along ``axis`` at locations ``indices``.
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] = = x[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D]
Parameters
----------
x: tensor<\*D, T> (Required)
indices: tensor<\*K, i32> (Required)
* ``rank(indices) == rank(x)``.
axis: const i32 (Optional):
* Default to ``0``.
Returns
-------
tensor<\*D, T>:
* Output tensor has the same shape as ``indices``.
Attributes
----------
T: fp16, fp32, i32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
indices=TensorInputType(type_domain=types.int32),
axis=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32),
}
def default_inputs(self):
return DefaultInputs(
axis=0,
)
@precondition(allow=VALUE)
def value_inference(self):
x = self.x.val
indices = self.indices.val
axis = self.axis.val
return np.take_along_axis(x, indices, axis)
def type_inference(self):
if self.x.rank != self.indices.rank:
raise ValueError(
"Rank mismatch between input and indices. \
Input rank: {}, indices rank: {}".format(
self.x.rank, self.indices.rank
)
)
if self.axis.val < -self.x.rank or self.axis.val >= self.x.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.x.rank
for i in range(self.x.rank):
if i != axis:
assert self.x.shape[i] == self.indices.shape[i]
return types.tensor(self.x.dtype, self.indices.shape)
@register_op
class scatter_along_axis(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices`` along ``axis`` dimension
using ``mode`` operation.
Example: ``mode == update``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Example: ``mode == add``.
* For ``i`` in ``[0, len(indices)]``:
.. math::
idx = indices[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D]
.. math::
output[p_0, ..., p_{axis-1}, idx, p_{axis+1}, ..., p_D] =
.. math::
updates[p_0, ..., p_{axis-1}, i, p_{axis+1}, ..., p_D] +
.. math::
x[p_0, ..., p_{axis-1}, indice[i], p_{axis+1}, ..., p_D]
* For ``j! = i``:
.. math::
output[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D] =
.. math::
data[p_0, ..., p_{axis-1}, j, p_{axis+1}, ..., p_D]
Parameters
----------
data: tensor<\*D, T> (Required)
indices: tensor<\*K, i32> (Required)
* ``rank(indices) == rank(data)``.
updates: tensor<\*K, T> (Required)
* Must be the same shape as ``indices``.
axis: const i32 (Optional)
* Default to ``0``.
mode: const string (Optional)
* Default to ``add``.
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
Returns
-------
tensor<\*D, T>
* With the same type and shape as input ``x``.
Attributes
----------
T: fp16, fp32, i32
"""
input_spec = InputSpec(
data=TensorInputType(type_domain="T"),
indices=TensorInputType(type_domain=types.int32),
updates=TensorInputType(type_domain="T"),
axis=TensorInputType(const=True, optional=True, type_domain=types.int32),
mode=TensorInputType(const=True, optional=True, type_domain=types.str),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32),
}
def default_inputs(self):
return DefaultInputs(
axis=0,
mode="add",
)
@precondition(allow=VALUE)
def value_inference(self):
data = np.copy(self.data.val)
indices = self.indices.val
updates = self.updates.val
axis = self.axis.val
np_output = data
np.put_along_axis(np_output, indices, updates, axis=axis)
return np_output
def type_inference(self):
if self.axis.val < -self.data.rank or self.axis.val >= self.data.rank:
raise IndexError(
"Axis value {} is out of bounds for {} node {}".format(
self.axis.val, self.op_type, self.name
)
)
axis = self.axis.val
axis = axis if axis >= 0 else axis + self.data.rank
assert is_compatible_symbolic_vector(
self.indices.shape, self.updates.shape
)
assert self.data.rank == self.indices.rank
for i in range(self.data.rank):
if i != axis:
assert self.data.shape[i] == self.indices.shape[i]
return self.data.sym_type
@register_op
class gather_nd(Operation):
"""
Gather slices from ``x`` according to ``indices``, similar to `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.
The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a slice
of ``x``:
.. math::
output[i_0, ..., i_{K-2}]= x[indices[i_0, ..., i_{K-2}]]
Where ``K = rank(indices)`` and ``x[indices[i_0, ..., i_{K-2}]]`` has rank
``rank(x) - indices.shape[-1]``.
Parameters
----------
x: tensor<\*D, T> (Required)
indices: tensor<\*K, i32> (Required)
Returns
-------
tensor<\*V, T>
* ``V = K[:-1] + D[K[-1]:]``, where ``D = x.shape`` and ``K = indices.shape``.
Attributes
----------
T: fp16, fp32, i32
References
----------
See `tf.gather_nd <https://www.tensorflow.org/api_docs/python/tf/gather_nd>`_.
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
indices=TensorInputType(type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32),
}
def type_inference(self):
assert self.indices.shape[-1] <= self.x.rank
out_type = self.x.dtype
out_shape = self.indices.shape[:-1] + self.x.shape[self.indices.shape[-1] :]
return types.tensor(out_type, out_shape)
@register_op
class scatter_nd(Operation):
"""
Scatter ``updates`` to ``data`` at locations ``indices``.
The ``indices`` is a K-dim tensor, where ``indices[i_0,...,i_{K-2}]`` defines a
slice of ``data``, ``K = rank(indices)``, and ``data[indices[i_0, ..., i_{K-2}]]``
has rank ``rank(data) - indices.shape[-1]``.
* Example: ``mode == update``: The ``output`` is set to ``data`` initially, and
the op updates ``output`` as follows:
.. math::
output[indices[i_0, ..., i_{K-2}]]= updates[indices[i_0, ..., i_{K-2}]]
* Example: ``mode == add``. The update rule is:
.. math::
output[indices[i_0, ..., i_{K-2}]] += updates[indices[i_0, ..., i_{K-2}]]
Parameters
----------
data: tensor<\*D, T> (Required)
indices: tensor<\*K, i32> (Required)
updates: tensor<\*K, T> (Required)
* Must be the shape as ``K[:-1]+data.shape[K[-1]:]``.
mode: const string (Optional)
* Default to ``add``.
* Can be the following modes: ``update``, ``add``, ``sub``, ``mul``,
``div``, ``max``, ``min``.
Returns
-------
tensor<\*D, T>
* A tensor with the same shape and type as ``data``.
Attributes
----------
T: fp16, fp32, i32
"""
input_spec = InputSpec(
data=TensorInputType(type_domain="T"),
indices=TensorInputType(type_domain=types.int32),
updates=TensorInputType(type_domain="T"),
mode=TensorInputType(const=True, optional=True, type_domain=types.str),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32),
}
def default_inputs(self):
return DefaultInputs(
mode="add",
)
def type_inference(self):
assert self.indices.shape[-1] <= self.data.rank
expected_updates_shape = (
self.indices.shape[:-1] + self.data.shape[self.indices.shape[-1] :]
)
assert is_compatible_symbolic_vector(
self.updates.shape, tuple(expected_updates_shape)
)
return self.data.sym_type
| bsd-3-clause | 9040f64e5f54ccff4e2c2713b08c82d7 | 29.069217 | 140 | 0.510116 | 3.386951 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/_deployment_compatibility.py | 1 | 6175 | # Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from enum import IntEnum
from coremltools import (_SPECIFICATION_VERSION_IOS_13,
_SPECIFICATION_VERSION_IOS_14,
_SPECIFICATION_VERSION_IOS_15,
_SPECIFICATION_VERSION_IOS_16,
_SPECIFICATION_VERSION_IOS_17)
class AvailableTarget(IntEnum):
# iOS versions
iOS13 = _SPECIFICATION_VERSION_IOS_13
iOS14 = _SPECIFICATION_VERSION_IOS_14
iOS15 = _SPECIFICATION_VERSION_IOS_15
iOS16 = _SPECIFICATION_VERSION_IOS_16
iOS17 = _SPECIFICATION_VERSION_IOS_17
# macOS versions (aliases of iOS versions)
macOS15 = _SPECIFICATION_VERSION_IOS_13
macOS16 = _SPECIFICATION_VERSION_IOS_14
macOS10_15 = _SPECIFICATION_VERSION_IOS_13
macOS10_16 = _SPECIFICATION_VERSION_IOS_14
macOS11 = _SPECIFICATION_VERSION_IOS_14
macOS12 = _SPECIFICATION_VERSION_IOS_15
macOS13 = _SPECIFICATION_VERSION_IOS_16
macOS14 = _SPECIFICATION_VERSION_IOS_17
# watchOS versions (aliases of iOS versions)
watchOS6 = _SPECIFICATION_VERSION_IOS_13
watchOS7 = _SPECIFICATION_VERSION_IOS_14
watchOS8 = _SPECIFICATION_VERSION_IOS_15
watchOS9 = _SPECIFICATION_VERSION_IOS_16
watchOS10 = _SPECIFICATION_VERSION_IOS_17
# tvOS versions (aliases of iOS versions)
tvOS13 = _SPECIFICATION_VERSION_IOS_13
tvOS14 = _SPECIFICATION_VERSION_IOS_14
tvOS15 = _SPECIFICATION_VERSION_IOS_15
tvOS16 = _SPECIFICATION_VERSION_IOS_16
tvOS17 = _SPECIFICATION_VERSION_IOS_17
# customized __str__
def __str__(self):
original_str = super().__str__()
new_str = original_str.replace(type(self).__name__, "coremltools.target")
return new_str
_get_features_associated_with = {}
def register_with(name):
def decorator(func):
if name not in _get_features_associated_with:
_get_features_associated_with[name] = func
else:
raise ValueError("Function is already registered with {}".format(name))
return func
return decorator
@register_with(AvailableTarget.iOS14)
def iOS14Features(spec):
features_list = []
if spec.WhichOneof("Type") == "neuralNetwork":
nn_spec = spec.neuralNetwork
elif spec.WhichOneof("Type") in "neuralNetworkClassifier":
nn_spec = spec.neuralNetworkClassifier
elif spec.WhichOneof("Type") in "neuralNetworkRegressor":
nn_spec = spec.neuralNetworkRegressor
else:
raise ValueError("Invalid neural network specification for the model")
# Non-zero default optional values
for idx, input in enumerate(spec.description.input):
value = 0
if input.type.isOptional:
value = max(value, input.type.multiArrayType.floatDefaultValue)
value = max(value, input.type.multiArrayType.doubleDefaultValue)
value = max(value, input.type.multiArrayType.intDefaultValue)
if value != 0:
msg = "Support of non-zero default optional values for inputs."
features_list.append(msg)
break
# Layers or modifications introduced in iOS14
new_layers = [
"oneHot",
"cumSum",
"clampedReLU",
"argSort",
"pooling3d",
"convolution3d",
"globalPooling3d",
]
for layer in nn_spec.layers:
layer_type = layer.WhichOneof("layer")
msg = ""
if layer_type in new_layers:
msg = "{} {}".format(layer_type.capitalize(), "operation")
if layer_type == "tile" and len(layer.input) == 2:
msg = "Dynamic Tile operation"
if layer_type == "upsample" and layer.upsample.linearUpsampleMode in [1, 2]:
msg = "Upsample operation with Align Corners mode"
if layer_type == "reorganizeData" and layer.reorganizeData.mode == 2:
msg = "Pixel Shuffle operation"
if layer_type == "sliceDynamic" and layer.sliceDynamic.squeezeMasks:
msg = "Squeeze mask for dynamic slice operation"
if layer_type == "sliceStatic" and layer.sliceDynamic.squeezeMasks:
msg = "Squeeze mask for static slice operation"
if layer_type == "concatND" and layer.concatND.interleave:
msg = "Concat layer with interleave operation"
if msg != "" and (msg not in features_list):
features_list.append(msg)
return features_list
def check_deployment_compatibility(spec, representation, deployment_target):
if not isinstance(deployment_target, AvailableTarget):
raise TypeError(
"Argument for deployment_target must be an enumeration from Enum class AvailableTarget"
)
for any_target in AvailableTarget:
if any_target > deployment_target and any_target in _get_features_associated_with:
missing_features = _get_features_associated_with[any_target](spec)
if missing_features:
msg = (
"Provided minimum deployment target requires model to be of version {} but converted model "
"uses following features which are available from version {} onwards. Please use a higher "
"minimum deployment target to convert. \n ".format(
deployment_target.value, any_target.value
)
)
for i, feature in enumerate(missing_features):
msg += " {}. {}\n".format(i + 1, feature)
raise ValueError(msg)
# Default exception throwing if not able to find the reason behind spec version bump
if spec.specificationVersion > deployment_target.value:
msg = (
"Provided deployment target requires model to be of version {} but converted model has version {} "
"suitable for later releases".format(
deployment_target.value, spec.specificationVersion,
)
)
raise ValueError(msg)
| bsd-3-clause | a0efc027016829e71f5570cfd862b5fd | 35.323529 | 112 | 0.637247 | 3.971061 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/backend/nn/load.py | 1 | 13848 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import coremltools as ct
from coremltools.converters._profile_utils import _profile
from coremltools.converters.mil.backend.backend_helper import \
_get_probability_var_for_classifier
from coremltools.converters.mil.input_types import (ColorLayout,
EnumeratedShapes,
ImageType, RangeDim, Shape)
from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types.symbolic import (any_symbolic,
any_variadic,
is_symbolic)
from coremltools.models import MLModel
from coremltools.models import neural_network as neural_network
from coremltools.models.datatypes import Array
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.neural_network.flexible_shape_utils import (
add_enumerated_image_sizes, add_multiarray_ndshape_enumeration,
set_multiarray_ndshape_range)
from ..backend_helper import (_get_colorspace_enum,
_validate_image_input_output_shapes)
from .op_mapping import convert_ops
from .passes.nn_passes import nn_backend_passes
def _convert_to_image_input(proto, inputs, skip_model_load=False):
tmp_model = MLModel(proto, skip_model_load=skip_model_load)
for input_type in inputs:
if isinstance(input_type, ImageType):
if input_type.color_layout in (ColorLayout.GRAYSCALE, ColorLayout.GRAYSCALE_FLOAT16):
gray_bias = input_type.bias
red_bias, green_bias, blue_bias = 0.0, 0.0, 0.0
elif input_type.color_layout == ColorLayout.RGB:
gray_bias = 0.0
red_bias, green_bias, blue_bias = input_type.bias
elif input_type.color_layout == ColorLayout.BGR:
gray_bias = 0.0
blue_bias, green_bias, red_bias = input_type.bias
tmp_model = neural_network.utils.make_image_input(
tmp_model,
input_type.name,
is_bgr=input_type.color_layout == ColorLayout.BGR,
image_format="NCHW" if input_type.channel_first else "NHWC",
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
scale=input_type.scale,
)
return tmp_model.get_spec()
def _convert_to_classifier(proto, classifier_config, skip_model_load=False):
tmp_model = MLModel(proto, skip_model_load=skip_model_load)
tmp_model = neural_network.utils.make_nn_classifier(
tmp_model,
classifier_config.class_labels,
classifier_config.predicted_feature_name,
classifier_config.predicted_probabilities_output,
)
return tmp_model.get_spec()
def _set_user_inputs(proto, inputs):
for input_type in inputs:
shape = input_type.shape
if isinstance(shape, EnumeratedShapes):
if isinstance(input_type, ImageType):
default_height, default_width = 0, 0
for inp in proto.description.input:
if inp.name == input_type.name:
default_height = inp.type.imageType.height
default_width = inp.type.imageType.width
break
image_sizes = []
if input_type.channel_first:
for s in shape.shapes:
if s.shape[-2] == default_height and s.shape[-1] == default_width:
continue
image_sizes.append(
flexible_shape_utils.NeuralNetworkImageSize(
height=s.shape[-2], width=s.shape[-1]
)
)
else:
for s in shape.shapes:
if s.shape[-3] == default_height and s.shape[-2] == default_width:
continue
image_sizes.append(
flexible_shape_utils.NeuralNetworkImageSize(
height=s.shape[-3], width=s.shape[-2]
)
)
add_enumerated_image_sizes(
proto, input_type.name, sizes=image_sizes
)
else:
add_multiarray_ndshape_enumeration(
proto, input_type.name, [tuple(s.shape) for s in shape.shapes]
)
elif isinstance(shape, Shape):
shape = shape.shape # This is shape in Shape
if all(
[
not isinstance(s, RangeDim) and not is_symbolic(s) and s > 0
for s in shape
]
):
continue
if isinstance(input_type, ImageType):
img_range = flexible_shape_utils.NeuralNetworkImageSizeRange()
if input_type.channel_first:
H = shape[-2]
W = shape[-1]
else:
H = shape[-3]
W = shape[-2]
if isinstance(H, RangeDim):
img_range.add_height_range((H.lower_bound, H.upper_bound))
elif is_symbolic(H):
img_range.add_height_range((1, -1))
else:
img_range.add_height_range((H, H))
if isinstance(W, RangeDim):
img_range.add_width_range((W.lower_bound, W.upper_bound))
elif is_symbolic(W):
img_range.add_width_range((1, -1))
else:
img_range.add_width_range((W, W))
flexible_shape_utils.update_image_size_range(
proto, input_type.name, img_range
)
else:
lb = []
ub = []
for s in shape:
if isinstance(s, RangeDim):
lb.append(s.lower_bound)
ub.append(s.upper_bound)
elif is_symbolic(s):
lb.append(1)
ub.append(-1)
else:
lb.append(s)
ub.append(s)
set_multiarray_ndshape_range(
proto, input_type.name, lower_bounds=lb, upper_bounds=ub
)
def _set_symbolic_inputs(proto, symbolic_inputs):
# Set symbolic input shapes by -1 infered from graph
for input_name, shape in symbolic_inputs.items():
lb = [1 if is_symbolic(d) else d for d in shape]
ub = [-1 if is_symbolic(d) else d for d in shape]
set_multiarray_ndshape_range(
proto, input_name, lower_bounds=lb, upper_bounds=ub
)
def _set_optional_inputs(proto, input_types):
# Set default values for optional input_types
default_map = {}
for input_type in input_types:
if isinstance(input_type, ImageType):
continue
if input_type.default_value is not None:
default_map[input_type.name] = input_type.default_value
for idx, input in enumerate(proto.description.input):
name = proto.description.input[idx].name
if name in default_map:
default_value = default_map[name]
proto.description.input[idx].type.isOptional = True
array_t = proto.description.input[idx].type.multiArrayType
default_fill_val = default_value.flatten()[0]
array_t.floatDefaultValue = default_fill_val
if default_fill_val != 0 or list(default_value.shape) != \
array_t.shape:
# promote spec version to 5 and set the default value
proto.specificationVersion = max(proto.specificationVersion,
ct._SPECIFICATION_VERSION_IOS_14)
# array_t.shape is not empty.
array_t.ClearField('shape')
array_t.shape.extend(list(default_value.shape))
@_profile
def load(prog, **kwargs):
if "main" not in prog.functions:
msg = "main function not found in program {}"
raise ValueError(msg.format(prog))
if len(prog.functions) != 1:
msg = (
"Program must have exactly one `main` function to "
"convert to NN. Program: {}"
)
raise ValueError(msg.format(prog))
nn_backend_passes(prog)
input_types = prog.main_input_types
output_types = prog.main_output_types
v1_inputs = []
symbolic_inputs = {}
for name, var in prog.functions["main"].inputs.items():
if types.is_tensor(var.sym_type):
sym_shape = var.sym_type.get_shape()
if any_variadic(sym_shape):
raise NotImplementedError("Variadic rank is not supported")
if any_symbolic(sym_shape):
user_specified = False
for input_type in input_types:
if name == input_type.name:
sym_shape = input_type.shape.default
user_specified = True
break
# Use dummy static shape, and will set it later.
shape = [1 if is_symbolic(d) else d for d in sym_shape]
if not user_specified:
symbolic_inputs[name] = sym_shape
else:
shape = sym_shape
v1_inputs.append((name, Array(*shape)))
elif types.is_scalar(var.sym_type):
v1_inputs.append((name, Array(1)))
else:
raise NotImplementedError()
v1_outputs = []
for var in prog.functions["main"].outputs:
if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
# Disregard the output types
v1_outputs.append((var.name, None))
else:
raise NotImplementedError()
# create neural network builder
builder = neural_network.NeuralNetworkBuilder(
v1_inputs,
v1_outputs,
disable_rank5_shape_mapping=True,
use_float_arraytype=True,
)
# const in V2 are added lazily to V1 by each op whenever needed.
# `const_context` stores the const names we've added so far and avoid
# adding a const more than once.
# const_context: list[set of str] (const name for v1 & v2
# (the same)). Note that in NN in outer layer is visible from the inner
# layer, so the const_context is simply a stack of set.
const_context = []
# Iterate through ops and add to builder
convert_ops(
const_context,
builder,
prog.functions["main"].operations,
prog.functions["main"].outputs,
)
proto = builder.spec
# image input
has_image_input = any([isinstance(s, ImageType) for s in input_types])
if has_image_input:
proto = _convert_to_image_input(proto, input_types,
skip_model_load=kwargs.get("skip_model_load", False))
# image output
if output_types is not None:
assert len(output_types) == len(prog.functions["main"].outputs), \
"number of mil program outputs do not match the number of outputs provided by the user"
for i, output_proto_desc in enumerate(proto.description.output):
output_var = prog.functions["main"].outputs[i]
if isinstance(output_types[i], ImageType):
if not types.is_tensor(var.sym_type):
raise ValueError("Image output, '{}', is a scalar, but it should be a tensor of rank 4".format(
var.name))
shape = var.sym_type.get_shape()
if any_variadic(shape):
raise ValueError("Variable rank model outputs, that are ImageTypes, are not supported")
if any([is_symbolic(d) for d in shape]):
raise NotImplementedError("Image output '{}' has symbolic dimensions in its shape".
format(var.name))
_validate_image_input_output_shapes(output_types[i].color_layout, shape, var.name, is_input=False)
clr_space = _get_colorspace_enum(output_types[i].color_layout)
output_proto_desc.type.imageType.colorSpace = clr_space
output_proto_desc.type.imageType.width = shape[-1]
output_proto_desc.type.imageType.height = shape[-2]
# classifier flag
classifier_config = kwargs.get("classifier_config", None)
if classifier_config is not None:
# verify that classifier_config.predicted_probabilities_output if its exists.
# And if its empty/None, fill it with the last non const op's output
# this is done in "_get_probability_var_for_classifier()"
probability_var = _get_probability_var_for_classifier(prog, classifier_config)
if classifier_config.predicted_probabilities_output != probability_var.name:
classifier_config.predicted_probabilities_output = probability_var.name
# add classifier related fields to the proto spec
proto = _convert_to_classifier(proto, classifier_config,
skip_model_load=kwargs.get("skip_model_load", False))
_set_user_inputs(proto, input_types)
_set_symbolic_inputs(proto, symbolic_inputs)
_set_optional_inputs(proto, input_types)
return proto
| bsd-3-clause | ea10b4b3eec40bdb12c6677839f48bc1 | 43.242812 | 115 | 0.558131 | 4.220664 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/passes/prelu_fusion.py | 1 | 7821 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil import Builder as mb
from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import \
fuse_all_blocks
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil.passes.helper import \
_check_var_scalar_value
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
def _prelu_pattern(x):
# MIL operation takes named inputs (instead of positional inputs).
# Here `name` argument is MANDATORY.
neg = mb.mul(x=x, y=-1., name="neg")
relu1 = mb.relu(x=neg, name="relu1")
# use any constant here to match, rank and shape will be verified in "is_var_constraint_satisifed" method
mul = mb.mul(x=relu1, y=np.random.rand(2, 2, 2, 2), name="alpha_mul")
relu2 = mb.relu(x=x, name="relu2")
out = mb.add(x=relu2, y=mul, name="out_op")
return out
class Pattern1:
@staticmethod
def is_var_constraint_satisifed(pattern):
# input must be rank 4
if pattern.root_var.rank != 4:
return False
# output must be rank 4
if pattern.out_op.outputs[0].rank != 4:
return False
if not (_check_var_scalar_value(pattern.neg.y, -1) or _check_var_scalar_value(pattern.neg.x, -1)):
return False
if pattern.alpha_mul.x.val is not None:
alpha = pattern.alpha_mul.x.val
elif pattern.alpha_mul.y.val is not None:
alpha = pattern.alpha_mul.y.val
else:
return False
# alpha must be of shape (1, C, 1, 1) or (C, 1, 1)
if len(alpha.shape) not in (3, 4):
return False
if alpha.size != alpha.shape[-3]:
return False
return True
@staticmethod
def transform_pattern(pattern):
# remove all the ops, and replace with a prelu op
out_var = pattern.out_op.outputs[0]
if pattern.alpha_mul.x.val is not None:
alpha = pattern.alpha_mul.x.val
else:
alpha = pattern.alpha_mul.y.val
alpha_vector = -1 * alpha.flatten()
x = mb.prelu(x=pattern.root_var, alpha=alpha_vector, name=out_var.name, before_op=pattern.out_op)
pattern.out_op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=pattern.out_op, old_var=out_var, new_var=x
)
# Remove all the ops at once
pattern.block.remove_ops(pattern.op_list())
@staticmethod
def get_prelu_pattern():
"""
y = a * relu(-1 * x) + relu(x)
when x is rank 4, and "a" is of shape (1, C, 1, 1) or (C, 1, 1),
this is equivalent to prelu with alpha = -a.flatten(),
"""
@mb.program(input_specs=[mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(),
get_new_symbol(), get_new_symbol()])), ])
def prelu_pattern(x):
return _prelu_pattern(x)
return prelu_pattern
class Pattern2:
@staticmethod
def is_var_constraint_satisifed(pattern):
perm = pattern.transpose.perm.val
if not np.array_equal(perm, np.array([0,2,3,1])):
return False
# output must be rank 4
if pattern.out_op.outputs[0].rank != 4:
return False
if not (_check_var_scalar_value(pattern.neg.y, -1) or _check_var_scalar_value(pattern.neg.x, -1)):
return False
if pattern.alpha_mul.x.val is not None:
alpha = pattern.alpha_mul.x.val
elif pattern.alpha_mul.y.val is not None:
alpha = pattern.alpha_mul.y.val
else:
return False
# alpha must be of shape (C,) or (1,C) or (1,1,C) or (1,1,1,C)
if alpha.size != alpha.shape[-1]:
return False
return True
@staticmethod
def transform_pattern(pattern):
# remove all the ops, and replace with a prelu op + transpose op
perm = pattern.transpose.perm.val
out_var = pattern.out_op.outputs[0]
if pattern.alpha_mul.x.val is not None:
alpha = pattern.alpha_mul.x.val
else:
alpha = pattern.alpha_mul.y.val
alpha_vector = -1 * alpha.flatten()
x = mb.prelu(x=pattern.root_var, alpha=alpha_vector, before_op=pattern.out_op)
x = mb.transpose(x=x, perm=perm, name=out_var.name, before_op=pattern.out_op)
pattern.out_op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=pattern.out_op, old_var=out_var, new_var=x
)
# Remove all the ops at once
pattern.block.remove_ops(pattern.op_list())
@staticmethod
def get_prelu_pattern():
"""
x1 = transpose(perm=(0,2,3,1))(x)
y = a * relu(-1 * x1) + relu(x1)
when x is rank 4, and "a" is of shape (C,) or (1, C) or (1,1,C) or (1,1,1,C),
this is equivalent to prelu with alpha = -a.flatten(), followed by a transpose
with perm (0,2,3,1)
"""
@mb.program(input_specs=[mb.TensorSpec(shape=([get_new_symbol(), get_new_symbol(),
get_new_symbol(), get_new_symbol()])), ])
def prelu_pattern(x):
# perm value can be anything, it will be checked in "is_var_constraint_satisifed" method
x = mb.transpose(x=x, perm=[0,1,2,3], name="transpose")
return _prelu_pattern(x)
return prelu_pattern
@register_pass(namespace="common")
class fuse_prelu(AbstractGraphPass):
"""
Detect the following patterns that can be mapped to a prelu op.
Essentially prelu op can be broken down into the following ops: y = a * relu(-1 * x) + relu(x)
Pattern 1:
| ------------> relu --------------------|
| V
x (BCHW) ------| add -----> y (BCHW)
| ^
--------> mul -------> relu -----> mul---|
^ ^
| |
Const(val=-1) Const(name=a, shape=(C,1,1) or (1,C,1,1))
This will be mapped to:
x (BCHW) ------> prelu(alpha=a, shape=(C,)) ---------> y (BCHW)
Pattern 2:
| ------------> relu --------------------|
| V
x (BCHW) -->transpose(BHWC)---->| add -----> y (BHWC)
| ^
--------> mul -------> relu -----> mul---|
^ ^
| |
Const(val=-1) Const(shape=(C,) or (1,C) or (1,1,C) or (1,1,1,C))
This will be mapped to:
x (BCHW) ------> prelu ---------> transpose ------> y (BHWC)
"""
def apply(self, prog):
for pattern in (Pattern1, Pattern2):
fuse_all_blocks(ops_arrangement=pattern.get_prelu_pattern(),
var_constraints=pattern.is_var_constraint_satisifed,
transform_pattern=pattern.transform_pattern,
prog=prog)
| bsd-3-clause | cc961b91d7bb404d8aa29e7e2fd1c705 | 38.105 | 109 | 0.514129 | 3.661517 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/defs/iOS15/tensor_transformation.py | 1 | 35187 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
import sympy as sm
from coremltools import _logger as logger
from coremltools.converters.mil.mil import (Operation, get_new_symbol,
get_new_variadic_symbol,
precondition, types)
from coremltools.converters.mil.mil.input_type import (DefaultInputs,
InputSpec,
TensorInputType)
from coremltools.converters.mil.mil.operation import SYMBOL, VALUE
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.ops.defs._utils import \
solve_slice_by_index_shape
from coremltools.converters.mil.mil.types.symbolic import (any_symbolic,
any_variadic,
is_symbolic,
isscalar)
@register_op
class depth_to_space(Operation):
"""
Rearrange elements in a tensor from depth (channel) into spatial dimensions.
Parameters
----------
x: tensor<[n, C, H, W], T> (Required)
* Input tensor of rank ``4``.
block_size: const i32 (Required)
* The size of the spatial block. Must be greater than ``1`` and divisible by
channel dimension ``C``.
Returns
-------
tensor<[n, C / block_size^2, H x block_size, W x block_size], T>
* Where ``b`` is the block size.
Attributes
----------
T: fp16, fp32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
block_size=TensorInputType(const=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def type_inference(self):
x_type = self.x.dtype
n, c, h, w = self.x.shape
bs = self.block_size.val
ret_shape = (n, c // (bs * bs), h * bs, w * bs)
return types.tensor(x_type, ret_shape)
@register_op
class expand_dims(Operation):
"""
Insert a single-dimension in a 1-D or higher tensor at each axis in axes.
Parameters
----------
x: tensor<\*?, T> (Required)
* Scalar or tensor.
axes: const tensor<[K], i32> Required
* ``K`` is the number of dimensions expanded.
* Insert single dimension at dimension index at each axes.
* Negative value to index from the end. ``-d-1 <= axis <= d``
where ``d`` is the rank of ``x``.
Returns
-------
tensor<\*(rank(x)+K), T>
* Same type as the input ``x`` with rank ``rank(x)+K``.
Attributes
----------
T: fp16, fp32, i32, bool
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
axes=TensorInputType(const=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def type_inference(self):
x_rank = self.x.rank
x_type = self.x.dtype
x_shape = list(self.x.shape)
axes = self.axes.val
out_rank = x_rank + len(axes)
for axis in axes:
if axis <= -out_rank - 1 or axis >= out_rank:
msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}'
raise IndexError(
msg.format(axis, self.op_type, self.name, self.x.shape)
)
ret_shape = x_shape
axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes])
for axis in axes:
ret_shape.insert(axis, 1)
return types.tensor(x_type, tuple(ret_shape))
@precondition(allow=VALUE)
def value_inference(self):
axes = self.axes.val
out_rank = self.x.rank + len(axes)
for axis in axes:
if axis <= -out_rank - 1 or axis >= out_rank:
msg = 'Axis value {} is out of bounds for {} node "{}" of shape {}'
raise IndexError(
msg.format(axis, self.op_type, self.name, self.x.shape)
)
axes = sorted([out_rank + axis if axis < 0 else axis for axis in axes])
ret_shape = list(self.x.shape)
for axis in axes:
ret_shape.insert(axis, 1)
return np.reshape(self.x.val, ret_shape)
def reshape_with_symbol(v, shape):
"""
Perform basic reshape if v is symbolic (not array of symbols).
"""
if is_symbolic(v):
return np.array(v).reshape(shape)
shape = [int(s) for s in shape]
return v.reshape(shape)
@register_op
class reshape(Operation):
"""
Return a tensor that has the same values as ``x`` with shape ``shape``.
``shape`` must have the same volume (number of elements) as ``x``.
Parameters
----------
x: tensor<\*?, T> (Required)
* A n-D tensor or a scalar.
* If ``x`` is fixed rank (and possibly contains symbolic dimension),
shape may contain elements that are not positive integers (see below).
* If ``x`` is variadic rank, shape can only contain positive integers.
shape: tensor<[K], i32> (Required)
A 1-D tensor, with elements from the following:
* Positive integers.
* Symbols: All but one symbol in shape must be present in ``x.shape``.
The new symbol that is not present in ``x.shape`` represent a dimension
such that the total size remains constant. Symbol is illegal
if ``x`` is variadic rank.
* ``-1``: ``-1`` introduces a new symbol (see Symbols). Therefore, ``-1`` is
allowed if all symbols in the shape appear in ``x.shape``. ``-1`` is illegal
if ``x`` is variadic rank.
* ``0``: If ``K == rank(x)`` then ``0`` means inheriting from the corresponding
dimension in ``x.shape``. ``0`` is illegal if ``x`` is variadic rank.
Returns
-------
tensor<\*?, T>
* Tensor with shape determined by the input shape.
Attributes
----------
T: fp16, fp32, i32, bool
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
shape=TensorInputType(type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def type_inference(self):
if any_symbolic(self.shape.shape):
# We can't infer any shape if shape has variable length.
return types.tensor(self.x.dtype, (get_new_variadic_symbol(),))
# shape has fixed length here.
if self.shape.sym_val is None:
shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])])
return types.tensor(self.x.dtype, shape)
t, _ = self._get_type_val()
return t
@precondition(allow=VALUE | SYMBOL)
def value_inference(self):
_, val = self._get_type_val()
return val
def _get_type_val(self):
x_type = self.x.dtype
x_shape = self.x.shape
x_vol = np.prod(x_shape)
# shape is const, and thus sym_val is not None
sym_shape = self.shape.sym_val
sym_shape = [get_new_symbol() if d == -1 else d for d in sym_shape]
try:
ret_shape = reshape.enforce_volumetric_constraint(x_vol, sym_shape)
except:
ret_shape = sym_shape
ret_val = None
if self.x.val is not None and all(isscalar(a) and not is_symbolic(a) for a in ret_shape):
ret_val = reshape_with_symbol(self.x.val, ret_shape)
return types.tensor(x_type, tuple(ret_shape)), ret_val
@staticmethod
def enforce_volumetric_constraint(left_volume, inshape):
left_symbols = set()
if is_symbolic(left_volume):
left_symbols = left_volume.free_symbols
# Generally, we want to solve for right in terms of left. But this
# is kinda annoying actually.
shape = list(inshape)
# Handling when reshape is given 0 instead of actual input
# input tensor shape: [4, 3, 2], reshape:[0, -1], output tensor shape: [4, 6]
if shape.count(-1) > 1:
raise ValueError(
"Reshape op supports only one dimension to be -1. Given {}".format(
shape.count(-1)
)
)
infer_dim_index = shape.index(-1) if -1 in shape else None
right_volume = 1
for i in shape:
if i != -1:
right_volume = right_volume * i
if infer_dim_index:
shape[infer_dim_index] = left_volume // right_volume
if not is_symbolic(right_volume):
return shape
constraints = [left_volume - right_volume]
solve_for = [s for s in shape if is_symbolic(s)]
for rightsym in solve_for:
sol = sm.solve(constraints, [rightsym], dict=True)
if not isinstance(sol, list):
sol = [sol]
# look for an acceptable solution
for s in sol:
if 0 in s.values():
continue
for i in range(len(shape)):
if shape[i] in s:
v = s[shape[i]]
if len(v.free_symbols - left_symbols) > 0:
continue
try:
shape[i] = int(v)
except:
shape[i] = v
return shape
@register_op
class reverse(Operation):
"""
Reverse the order of the input tensor ``x`` along specified ``axes`` (dimensions).
Parameters
----------
x: tensor<\*?, T> (Required)
* Input tensor.
axes: const<D, i32> (Optional)
* Dimension(s) to reverse. Each axis must be in the range ``[-rank(x), rank(x))``.
* Defaults to None (reverse on all dimensions).
Returns
-------
tensor<\*?, T>
* Same type and shape as the input tensor.
Attributes
----------
T: fp16, fp32, i32, bool
References
----------
See `tf.reverse <https://www.tensorflow.org/api_docs/python/tf/reverse>`_
and `TORCH <https://pytorch.org/docs/stable/torch.html#torch.flip>`_.
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
axes=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def default_inputs(self):
return DefaultInputs(
axes=None,
)
def type_inference(self):
return self.x.sym_type
@precondition(allow=VALUE)
def value_inference(self):
res = self.x.val
axes = self.axes.val if self.axes is not None else range(self.x.rank)
for axis in axes:
res = np.flip(res, axis=axis)
return res
@register_op
class reverse_sequence(Operation):
"""
Reverse variable length slices for specified axes / dimensions of the input
tensor. This op first slices input tensor along the ``batch_axis`` dimension, then
partially reverses the elements along the ``seq_axis`` for the first ``lengths[i]``
elements.
Parameters
----------
x: tensor<\*?, T> (Required)
* Input tensor.
lengths: tensor<L, i32> (Required)
* 1-dimensional tensor of length ``x.shape[batch_axis]`` specifying the length
of the sequence to reverse.
* Values must be in range ``[0, x.shape[seq_axis]]``.
seq_axis: const<i32> (Optional)
* The dimension to reverse.
* Defaults to ``0``.
batch_axis: const<i32> (Optional)
* Dimension for slicing.
* Defaults to ``0``.
Returns
-------
tensor<\*?, T>
* Same type and shape as the input tensor.
Attributes
----------
T: fp16, fp32, i32, bool
References
----------
`tf.reverse_sequence <https://www.tensorflow.org/api_docs/python/tf/reverse_sequence>`_
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
lengths=TensorInputType(type_domain=types.int32),
seq_axis=TensorInputType(const=True, optional=True, type_domain=types.int32),
batch_axis=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def default_inputs(self):
return DefaultInputs(
seq_axis=0,
batch_axis=0)
def type_inference(self):
return self.x.sym_type
@precondition(allow=VALUE)
def value_inference(self):
raise NotImplementedError("TODO")
@register_op
class slice_by_index(Operation):
"""
Method for numpy style indexing and slicing.
With a tensor ``x``, this method achieves the following:
``result = x[begin[0]: end[0]: stride[0], begin[1]: end[1]: stride[1], ...]``
Note: This method does not support pure indexing. You would need to do a
squeeze if indexing is intended.
Parameters
----------
x: tensor<*?, T> (Required)
* Input tensor
begin: tensor<[rank<x>], i32> (Required)
* Starting index for the dimension of slicing.
end: tensor<[rank(x)], i32> (Required)
* Ending index for the dimension of slicing.
stride: tensor<[rank(x)], i32> (Optional)
* Default is all ``1``.
* Stride for the dimension of slicing.
begin_mask: tensor<[rank(x)], bool> (Optional)
* Default to all ``False``.
* If ``begin_mask[i]==True``, neglect ``begin[i]``, and set ``begin[i]`` to ``0``.
end_mask: tensor<[rank(x)], bool> (Optional)
* Default to all ``False``.
* If ``end_mask[i]==True``, neglect ``end[i]``, and set ``end[i]`` to ``x.shape[i]``.
squeeze_mask: tensor<[rank(x)], bool> (Optional)
* Default to all ``False``.
* If ``squeeze_mask[i]==true``, neglect ``end[i]``, and do the pure index at ``begin[i]``.
Returns
-------
tensor<\*?, T>
- Scalar or tensor.
Attributes
----------
T: fp16, fp32, i32, bool
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
begin=TensorInputType(type_domain=types.int32),
end=TensorInputType(type_domain=types.int32),
stride=TensorInputType(const=True, optional=True, type_domain=types.int32),
begin_mask=TensorInputType(const=True, optional=True, type_domain=types.bool),
end_mask=TensorInputType(const=True, optional=True, type_domain=types.bool),
squeeze_mask=TensorInputType(const=True, optional=True, type_domain=types.bool),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def default_inputs(self):
return DefaultInputs(
stride=None,
begin_mask=None,
end_mask=None,
squeeze_mask=None,
)
def type_inference(self):
# get tensor and set default value
begin = self.begin.val
end = self.end.val
x_rank = self.x.rank
stride = self.stride.val if self.stride is not None else [1] * x_rank
begin_mask = (
self.begin_mask.val if self.begin_mask is not None else [False] * x_rank
)
end_mask = self.end_mask.val if self.end_mask is not None else [False] * x_rank
squeeze_mask = (
self.squeeze_mask.val if self.squeeze_mask is not None else [False] * x_rank
)
# solve shape
x_shape = self.x.shape
ret_shape = solve_slice_by_index_shape(x_shape, begin, end, stride, begin_mask, end_mask, squeeze_mask)
if len(ret_shape) == 0:
# Scalar case.
return self.x.dtype
else:
return types.tensor(self.x.dtype, tuple(ret_shape))
def value_inference(self):
if self.x.sym_val is None or self.begin.val is None or self.end.val is None:
return None
begin = [int(i) for i in list(self.begin.val[:])]
end = [int(i) for i in list(self.end.val[:])]
stride = [1] * self.x.rank if self.stride is None else self.stride.val
begin_mask = (
[False] * self.x.rank if self.begin_mask is None else self.begin_mask.val
)
end_mask = [False] * self.x.rank if self.end_mask is None else self.end_mask.val
squeeze_mask = (
[False] * self.x.rank
if self.squeeze_mask is None
else self.squeeze_mask.val
)
slices = []
for idx, mask in enumerate(begin_mask):
if mask:
begin[idx] = None
for idx, mask in enumerate(end_mask):
if mask:
end[idx] = None
squeeze_axes = []
for idx, mask in enumerate(squeeze_mask):
if mask:
end[idx] = None
stride[
idx
] = 2147483647 # We slice out only 1 element by setting stride to INF
squeeze_axes.append(idx)
for idx in range(self.x.rank):
slices.append(slice(begin[idx], end[idx], stride[idx]))
slices = tuple(slices)
res = self.x.sym_val[slices]
# remove squeezed axes
if len(squeeze_axes) > 0:
if len(squeeze_axes) == len(res.shape):
if len(res) == 0:
logger.warning("%s seems to be a 0 sized tensor", self.name)
return np.array([])
res = res.tolist()[0]
if is_symbolic(res):
return res
elif self.x.dtype == types.int32 or self.x.dtype == types.int64:
res = np.int32(res)
elif self.x.dtype == types.float or self.x.dtype == types.double:
res = np.float32(res)
else:
raise ValueError(
"Unable to convert type {}".format(self.x.sym_val.dtype)
)
else:
res = np.squeeze(res, axis=tuple(squeeze_axes))
return res
@register_op
class slice_by_size(Operation):
"""
Slice input tensor starting from the given ``begin`` index and by
the amount specified by the ``size`` input, for each dimension.
Parameters
----------
x: tensor<*?, T> (Required)
* Input tensor.
begin: tensor<[rank(x)], i32> Required
* The begin index for slice.
size: tensor<[rank(x)], i32> Required
* The size that is to be sliced. If ``size`` is ``-1``,
all the remaining elements starting with "begin" are sliced.
Returns
-------
tensor<\*?, T>
* Scalar or tensor.
Attributes
----------
T: fp16, fp32, i32, bool
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
begin=TensorInputType(type_domain=types.int32),
size=TensorInputType(type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def type_inference(self):
if self.begin.rank != 1:
raise ValueError(
"begin should be 1-D tensor, got {}-D tensor instead".format(
self.begin.rank
)
)
if self.size.rank != 1:
raise ValueError(
"size should be 1-D tensor, got {}-D tensor instead".format(
self.size.rank
)
)
if self.x.rank != self.begin.shape[0]:
raise ValueError(
"Length of begin {} doesn't equal to input rank {}.".format(
len(self.begin.shape[0]), len(self.x.rank)
)
)
if self.x.rank != self.size.shape[0]:
raise ValueError(
"Length of size {} doesn't equal to input rank {}.".format(
len(self.size.shape[0]), len(self.x.rank)
)
)
x_shape = self.x.shape
ret_shape = []
if self.size.sym_val is None:
ret_shape = [get_new_symbol() for _ in range(self.x.rank)]
return types.tensor(self.x.dtype, tuple(ret_shape))
for idx, s in enumerate(self.size.sym_val):
if is_symbolic(s):
ret_shape.append(s)
elif s != -1:
ret_shape.append(s)
elif self.begin.sym_val is not None:
ret_shape.append(x_shape[idx] - self.begin.sym_val[idx])
else:
ret_shape.append(get_new_symbol())
return types.tensor(self.x.dtype, tuple(ret_shape))
@precondition(allow=VALUE | SYMBOL)
def value_inference(self):
if any_symbolic(self.begin.sym_val):
return None
if any_symbolic(self.size.sym_val):
return None
if self.x.val is None:
return None
slices = []
for i in range(self.x.rank):
begin_val = self.begin.val[i]
if begin_val < 0:
if is_symbolic(self.x.shape[i]):
return None
begin_val += self.x.shape[i]
if self.size.val[i] > 0:
slices.append(slice(begin_val, begin_val + self.size.val[i]))
else:
slices.append(slice(begin_val, None, None))
return self.x.val[tuple(slices)]
@register_op
class space_to_depth(Operation):
"""
Rearrange elements in a tensor from spatial into depth (channel) dimension.
Parameters
----------
x: tensor<[n, C, H, W], T> (Required)
* Input tensor of rank ``4``.
block_size: const<i32> (Required)
* The size of the spatial block. Must be greater than ``1`` and divisible
by spatial dimensions ``H, W``.
Returns
-------
tensor<[n, C x block_size^2, H / block_size, W / block_size], T>
* Where ``b`` is the block size.
Attributes
----------
T: fp16, fp32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
block_size=TensorInputType(const=True, type_domain=types.int32)
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def type_inference(self):
x_type = self.x.dtype
n, c, h, w = self.x.shape
bs = self.block_size.val
ret_shape = (n, c * (bs * bs), h // bs, w // bs)
return types.tensor(x_type, ret_shape)
@register_op
class space_to_batch(Operation):
"""
Rearrange elements in a tensor from spatial into batch dimension.
Parameters
----------
x: tensor<[n, C, H, W], T> (Required)
* Input tensor must have rank 4.
* The first and the second dimension are batch, channel, respectively
* The remaining dimensions (H, W) are treated as "spatial dimensions"
block_shape: const tensor<[2], i32> (Required)
* The length of the block_shape must be `2`
* It defines the shapes of the block in which the spatial dimensions are divided
paddings: const tensor<[2, 2], i32> (Required)
* It must have shape `(2, 2)`
* It defines the padding for each spatial dimensions
Returns
-------
tensor<[new_n, C, new_H, new_W], T>
* new_n = n * block_shape[0] * block_shape[1]
* new_H = (H + paddings[0][0] + padding[0][1])/block_shape[0]
* new_W = (W + paddings[1][0] + padding[1][1])/block_shape[1]
* The output has the same rank as the input
Attributes
----------
T: fp16, fp32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
block_shape=TensorInputType(const=True, type_domain=types.int32),
paddings=TensorInputType(const=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def type_inference(self):
x_shape = self.x.shape
block_shape = self.block_shape.val
paddings = self.paddings.val
if self.x.rank != 4:
msg = "Input to space_to_batch op must be rank 4. Instead got an input with rank {}".format(self.x.rank)
raise ValueError(msg)
if paddings.shape != (block_shape.shape[0], 2):
msg = "block_shape and paddings must have shape [2], [2, 2] accordingly in the space_to_batch op. "\
"Got {}, {}.".format(block_shape.shape, paddings.shape)
raise ValueError(msg)
m = block_shape.shape[0]
if m != 2:
msg = "space_to_batch op only supports spatial dimensions = 2. Got {}".format(m)
raise ValueError(msg)
b = x_shape[0]
c = x_shape[1]
spatial_shape = x_shape[2:2+m]
if self.x.rank != m + 2:
raise ValueError("The input rank of space_to_batch op must exactly be " \
"len(block_shape){} + 2! Got {}".format(self.block_shape.val, self.x.rank))
padded_spatial_shape = [x + paddings[i][0] + paddings[i][1] for i, x in enumerate(spatial_shape)]
new_b = b * np.prod(block_shape)
new_spatial_shape = [padded_spatial_shape[i]/block_shape[i] for i in range(m)]
ret_shape = [new_b, c] + new_spatial_shape
x_type = self.x.dtype
return types.tensor(x_type, ret_shape)
@register_op
class batch_to_space(Operation):
"""
Rearrange elements in a tensor from batch into spatial dimension.
Parameters
----------
x: tensor<[n, C, H, W], T> (Required)
* Input tensor must have rank 4.
* The first and the second dimension are batch, channel, respectively
* The remaining dimensions (H, W) are treated as "spatial dimensions"
block_shape: const tensor<[2], i32> (Required)
* The length of the block_shape must be `2`
* It defines the shapes of the block in which the spatial dimensions are multiplied
crops: const tensor<[2, 2], i32> (Required)
* It must have shape `(2, 2)`
* It defines the amount to crop from each spatial dimensions
Returns
-------
tensor<[new_n, C, new_H, new_W], T>
* new_n = n / (block_shape[0] * block_shape[1])
* new_H = (H * block_shape[0]) - paddings[0][0] - padding[0][1]
* new_W = (W * block_shape[1]) - paddings[1][0] - padding[1][1]
* The output has the same rank as the input
Attributes
----------
T: fp16, fp32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
block_shape=TensorInputType(const=True, type_domain=types.int32),
crops=TensorInputType(const=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def type_inference(self):
x_shape = self.x.shape
block_shape = self.block_shape.val
crops = self.crops.val
if self.x.rank != 4:
msg = "Input to batch_to_space op must be rank 4. Instead got an input with rank {}".format(self.x.rank)
raise ValueError(msg)
if crops.shape != (block_shape.shape[0], 2):
msg = "block_shape and crops must have shape [2], [2, 2] accordingly in the batch_to_space op. "\
"Got {}, {}.".format(block_shape.shape, crops.shape)
raise ValueError(msg)
m = block_shape.shape[0]
if m != 2:
msg = "batch_to_space op only supports spatial dimensions = 2. Got {}".format(m)
raise ValueError(msg)
b = x_shape[0]
c = x_shape[1]
spatial_shape = x_shape[2:2+m]
if self.x.rank != m + 2:
raise ValueError("The input rank of batch_to_space op must exactly be " \
"len(block_shape){} + 2! Got {}".format(self.block_shape.val, self.x.rank))
if not is_symbolic(b) and b % np.prod(block_shape) != 0:
msg = ("Batch size must be perfectly divided by the product of block_shape. Got batch size {}, and block_shape {}."
).format(b, block_shape)
raise ValueError(msg)
new_b = b / np.prod(block_shape)
new_spatial_shape = [spatial_shape[i] * block_shape[i] for i in range(m)]
cropped_spatial_shape = [x - crops[i][0] - crops[i][1] for i, x in enumerate(new_spatial_shape)]
ret_shape = [new_b, c] + cropped_spatial_shape
x_type = self.x.dtype
return types.tensor(x_type, ret_shape)
@register_op
class squeeze(Operation):
"""
Remove single-dimension dimensions in a 1-D or higher tensor.
Parameters
----------
x: tensor<\*?,T> (Required)
* Must be at least 1-D.
axes: const<K,i32> (Optional)
* Axes to squeeze out.
* Default to remove all single-dimensions.
Returns
-------
tensor<\*(rank(x)-K),T>
* Tensor with same type as input ``x`` and rank ``rank(x)-K``.
Attributes
----------
T: fp16, fp32, i32, bool
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
axes=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def default_inputs(self):
return DefaultInputs(
axes=None,
)
def type_inference(self):
x_type = self.x.dtype
x_shape = self.x.shape
squeezed_shape = list(x_shape)
if self.axes is None:
# Squeeze all single-dim, assuming symbolic dims != 1
squeezed_shape = [s for s in squeezed_shape if s != 1]
else:
axes = self.axes.val
axes = [axis if axis >= 0 else axis + self.x.rank for axis in axes]
for i in sorted(axes)[::-1]: # descending order
if len(squeezed_shape) <= i:
raise ValueError(
"Cannot squeeze dim {} for shape {}".format(i, squeezed_shape)
)
squeezed_shape.pop(i)
return types.tensor(x_type, tuple(squeezed_shape)) if len(squeezed_shape) != 0 else x_type
@precondition(allow=VALUE)
def value_inference(self):
if self.x.val is None:
return None
if self.axes is None:
val = np.squeeze(self.x.val)
else:
val = np.squeeze(self.x.val, axis=tuple(self.axes.val))
return val if val.shape != () else self.x.val[0]
@register_op
class transpose(Operation):
"""
Permute tensor ``x`` dimensions according to ``perm``.
Parameters
----------
x: tensor<\*?, T> (Required)
* Must be at least 1-D. ``x`` may have a symbolic shape.
perm: const<[rank(x)], i32> (Required)
* Permutation order. -rank(x) <= perm[I] < rank(x) for all perm entries.
Returns
-------
tensor<\*?,T>
* Tensor with same rank and type as ``x``.
Attributes
----------
T: fp16, fp32, i32, bool
References
----------
`torch.Tensor.permute <https://pytorch.org/docs/stable/tensors.html#torch.Tensor.permute>`_
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
perm=TensorInputType(const=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32, types.bool),
}
def type_inference(self):
x_type = self.x.dtype
perm = self.perm.val
x_shape = np.array(self.x.shape)
if len(perm) != self.x.rank:
msg = "perm should have the same length as rank(x): {} != {}"
raise ValueError(msg.format(len(perm), self.x.rank))
if self.x.rank == 0:
return self.x.sym_type # scalar cannot be transposed
if any_variadic(self.x.shape):
ret_shape = get_new_variadic_symbol()
else:
ret_shape = x_shape[perm]
return types.tensor(x_type, tuple(ret_shape))
@precondition(allow=VALUE)
def value_inference(self):
return np.transpose(self.x.val, axes=self.perm.val)
@register_op
class pixel_shuffle(Operation):
"""
Rearrange elements in a tensor from depth (channel) into spatial dimensions.
Equivalent to PyTorch's ``PixelShuffle``.
Parameters
----------
x: tensor<[n, C x f^2, H, W], T> (Required)
* Input tensor of rank ``4``.
upscale_factor: const<i32>
* Factor to increase spatial resolution by.
Returns
-------
tensor<[n, C, H x f, W x f], T>
* Where ``f`` is the upscale factor.
Attributes
----------
T: fp16, fp32
References
----------
`torch.nn.PixelShuffle <https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html?highlight=pixel%20shuffle#torch.nn.PixelShuffle>`_
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
upscale_factor=TensorInputType(const=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def type_inference(self):
x_type = self.x.dtype
n, c, h, w = self.x.shape
f = self.upscale_factor.val
ret_shape = (n, c // (f * f), h * f, w * f)
return types.tensor(x_type, ret_shape)
@register_op
class sliding_windows(Operation):
"""
Return a tensor containing all windows of ``size``, separated by stride along the
given ``axis``.
Parameters
----------
x: tensor<[\*d0, d_axis, *dn], T>
* Input tensor.
axis: const<i32>
* Axis to perform the operation.
size: const<i32>
* Number of elements in the sliding window.
stride: const<i32> Optional
* Default to ``1``.
* The stride of the input elements in the sliding window.
Returns
-------
tensor<[\*d0, d_axis - size // stride + 1, size, \*dn], T>
* The output will be a tensor of rank ``N+1`` where ``N`` is the input tensor
rank.
Attributes
----------
T: fp16, fp32, int32
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
axis=TensorInputType(const=True, type_domain=types.int32),
size=TensorInputType(const=True, type_domain=types.int32),
stride=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32, types.int32),
}
def default_inputs(self):
return DefaultInputs(stride=1)
def type_inference(self):
x_shape = self.x.shape
axis = self.axis.val
size = self.size.val
stride = self.stride.val
ret_shape = list(x_shape)
ret_shape[axis] = (x_shape[axis] - size) // stride + 1
pos_axis = axis if axis >= 0 else axis + self.x.rank
ret_shape.insert(pos_axis + 1, size)
return types.tensor(self.x.dtype, tuple(ret_shape))
| bsd-3-clause | c5b4eecbbeb875d12095b55bd14fbddb | 31.915809 | 147 | 0.55475 | 3.681034 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/mil/ops/defs/iOS15/random.py | 1 | 9058 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil import (get_new_symbol,
get_new_variadic_symbol, types)
from coremltools.converters.mil.mil.input_type import (DefaultInputs,
InputSpec,
TensorInputType)
from coremltools.converters.mil.mil.operation import Operation
from coremltools.converters.mil.mil.ops.defs._op_reqs import register_op
from coremltools.converters.mil.mil.types.symbolic import any_symbolic
class RandomDistribution(Operation):
"""
Random Op Superclass
"""
input_spec = InputSpec(
shape=TensorInputType(type_domain=types.int32),
)
out_dtype = types.fp32
def type_inference(self):
if any_symbolic(self.shape.shape):
# We can't infer any shape if shape has variable length.
return types.tensor(self.out_dtype, (get_new_variadic_symbol(),))
# shape has fixed length here.
if self.shape.sym_val is None:
shape = tuple([get_new_symbol() for _ in range(self.shape.shape[0])])
return types.tensor(self.out_dtype, shape)
return types.tensor(self.out_dtype, tuple(self.shape.sym_val.tolist()))
"""
Random Op Implementation(s)
"""
@register_op
class random_bernoulli(RandomDistribution):
r"""
Returns a tensor with the specified shape, with random values from a Bernoulli
distribution.
.. math::
f(k) = \begin{cases}1-p &\text{if } k = 0\\
p &\text{if } k = 1\end{cases}
for :math:`k` in :math:`\{0, 1\}`.
Parameters
----------
shape: <K, i32> (Required)
* Target output tensor shape.
* ``K`` is the rank of the output tensor.
``shape[k] > 0`` for ``k = 0,..., K-1``.
prob: const<T> (Optional)
* The probability of sampling ``1``. Defaults to ``0.5``.
seed: const<i32> (Optional)
* Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<\*, T>
* A tensor of the given target output shape filled with random values.
Attributes
----------
T: fp16, fp32
See Also
--------
random_categorical, random_normal, random_uniform
"""
input_spec = (
InputSpec(
shape=TensorInputType(type_domain=types.int32),
prob=TensorInputType(const=True, optional=True, type_domain="T"),
seed=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
+ RandomDistribution.input_spec
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def default_inputs(self):
return super().default_inputs() + \
DefaultInputs(
seed=-1,
prob=0.5,
)
def type_inference(self):
self.out_dtype = self.prob.dtype
return super().type_inference()
@register_op
class random_categorical(Operation):
"""
Returns random values from a categorical distribution.
Parameters
----------
shape: <\*D_in, T>
* N-dimensional tensor, one of ``logits`` (event log-probabilities) or ``probs``
(event probabilities). The first ``N - 1`` dimensions specifies distributions,
and the last dimension represents a vector of probabilities.
mode: const<str> (Optional)
One of ``['logits', 'probs']``. Defaults to ``logits``.
size: const<i32> (Optional)
Number of samples to draw. Defaults to ``1``.
seed: const<i32> (Optional)
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<\*D_in[:-1] + [size], T>
* A tensor of the given target output shape filled with random values.
Attributes
----------
T: fp16, fp32
See Also
--------
random_bernoulli, random_normal, random_uniform
"""
input_spec = InputSpec(
x=TensorInputType(type_domain="T"),
mode=TensorInputType(const=True, optional=True, type_domain=types.str),
size=TensorInputType(const=True, optional=True, type_domain=types.int32),
seed=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def default_inputs(self):
return DefaultInputs(
mode="logits",
size=1,
seed=-1,
)
def type_inference(self):
self.out_dtype = self.x.dtype
output_shape = self.x.shape[:-1] + (self.size.val,)
return types.tensor(self.out_dtype, output_shape)
@register_op
class random_normal(RandomDistribution):
r"""
Returns a tensor with the specified shape, with random values from a normal
distribution.
Parameters
----------
shape: <K, i32> (Required)
* Target output tensor shape.
* ``K`` is the rank of the output tensor.
``shape[k] > 0`` for ``k = 0,..., K-1``.
mean: const<T> (Optional)
The mean (center) of the normal distribution. Defaults to 0.0.
stddev: const<T> (Optional)
The standard deviation (width) of the normal distribution. Defaults to ``1.0``.
seed: const<i32> (Optional)
Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<\*, T>
* A tensor of the given target output shape filled with random values.
Attributes
----------
T: fp16, fp32
See Also
--------
random_categorical, random_bernoulli, random_uniform
"""
input_spec = (
InputSpec(
shape=TensorInputType(type_domain=types.int32),
mean=TensorInputType(const=True, optional=True, type_domain="T"),
stddev=TensorInputType(const=True, optional=True, type_domain="T"),
seed=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
+ RandomDistribution.input_spec
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def default_inputs(self):
return super().default_inputs() + \
DefaultInputs(
mean=0.,
stddev=1.,
seed=-1,
)
def type_inference(self):
if self.mean.dtype != self.stddev.dtype:
raise ValueError("Incompatible primitive types in random_normal operation")
self.out_dtype = self.mean.dtype
return super().type_inference()
@register_op
class random_uniform(RandomDistribution):
r"""
Returns a tensor with the specified shape with random values from a uniform
distribution. Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high).
.. math::
p(x) = \frac{1}{high - low}
For a real number :math:`x`.
When ``high == low``, values of ``low`` will be returned. If ``high < low``,
the results are officially undefined and may eventually raise an error.
Parameters
----------
shape: <K, i32> (Required)
* Target output tensor shape.
* ``K`` is the rank of the output tensor.
``shape[k] > 0`` for ``k = 0,..., K-1``.
low: const<T> (Optional)
* Lower boundary of the output interval (inclusive). Defaults to ``0.0``.
high: const<T> (Optional)
* Upper boundary of the output interval (exclusive). Defaults to ``1.0``.
seed: const<i32> (Optional)
* Seed to create a reproducible sequence of values across multiple invokes.
Returns
-------
<\*, T>
* A tensor of the given target output shape filled with random values.
Attributes
----------
T: fp16, fp32
See Also
--------
random_categorical, random_bernoulli, random_normal
"""
input_spec = (
InputSpec(
shape=TensorInputType(type_domain=types.int32),
low=TensorInputType(const=True, optional=True, type_domain="T"),
high=TensorInputType(const=True, optional=True, type_domain="T"),
seed=TensorInputType(const=True, optional=True, type_domain=types.int32),
)
+ RandomDistribution.input_spec
)
type_domains = {
"T": (types.fp16, types.fp32),
}
def default_inputs(self):
return super().default_inputs() + \
DefaultInputs(
low=0.,
high=1.,
seed=-1,
)
def type_inference(self):
if self.low.dtype != self.high.dtype:
raise ValueError("Incompatible primitive types in random_uniform operation")
self.out_dtype = self.low.dtype
return super().type_inference()
| bsd-3-clause | c9f264146299fcbec2a3e35de344d980 | 29.809524 | 88 | 0.584235 | 3.978041 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/backend/mil/passes/sanitize_name_strings.py | 1 | 1030 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil.passes.name_sanitization_utils import (
NameSanitizer, sanitize_block)
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
@register_pass(namespace="mil_backend")
class sanitize_name_strings(AbstractGraphPass):
"""
Sanitize the names of vars and ops to make sure
that they are of the format as described in the NameSanitizer class, i.e.
of the format [a-zA-Z_][a-zA-Z0-9_]*
"""
def apply(self, prog):
for f in prog.functions.values():
sanitizer_vars = NameSanitizer(prefix="var_")
sanitizer_ops = NameSanitizer(prefix="op_")
sanitize_block(f, sanitizer_vars, sanitizer_ops, prog.main_input_types) | bsd-3-clause | 0f4734a5155dab90c01c89b49cd2ce6d | 43.826087 | 83 | 0.720388 | 3.421927 | false | false | false | false |
apple/coremltools | coremltools/converters/mil/frontend/tensorflow2/ssa_passes/remove_vacuous_cond.py | 1 | 4645 | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools import _logger as logger
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil.passes.helper import block_context_manager
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
@block_context_manager
def _remove_vacuous_cond_block(block):
num_changes = 0
for op in list(block.operations):
for b in op.blocks:
num_changes += _remove_vacuous_cond_block(b)
if op.op_type != "cond":
continue
then_ops = op.blocks[0].operations
else_ops = op.blocks[1].operations
if len(then_ops) > 1 or len(else_ops) > 1:
continue
# Pattern 1: dynamic length TensorList generates this pattern. See
# conversion functions of TensorList* ops for details. TF2's graph
# contains a tf.cond op with 2 sub-graphs. The condition is either
# `less_equal` or `greater_equal` op. 1 sub-graph contains only an
# identity op forwarding the original TensorList, another sub-graph
# contains TensorListResize op to generate a new TensorList. But in
# backend, list length is handled dynamically in list_write/scatter
# and thus, the entire tf.cond and it's sub-graphs can be removed.
if len(then_ops) == 0 and len(else_ops) == 0:
if op.pred.op.op_type not in {"less_equal", "greater_equal"}:
continue
# cond op must have pred
pred_x = op.pred.op.x.op
pred_y = op.pred.op.y.op
if pred_x is None and pred_y is None:
continue
if op.pred.op.op_type == "less_equal":
if pred_x.op_type != "list_length":
continue
new_var = pred_x.ls
else: # op.pred.op.op_type == 'greather_equal':
if pred_y.op_type != "list_length":
continue
new_var = pred_y.ls
op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=op, old_var=op.outputs[0], new_var=new_var
)
block.remove_ops([op]) # rely on DCE to remove extra cond inputs
num_changes += 1
# Pattern 2: both than and else branch contains exactly 1 identity op
if len(then_ops) == 1 and len(then_ops) == 1:
if then_ops[0].op_type != "identity" or else_ops[0].op_type != "identity":
continue
if then_ops[0].x != else_ops[0].x:
continue
new_var = mb.identity(x=then_ops[0].x, before_op=op, name=op.name)
op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=op, old_var=op.outputs[0], new_var=new_var
)
block.remove_ops([op]) # rely on DCE to remove extra cond inputs
num_changes += 1
return num_changes
@register_pass(namespace="tensorflow2")
class remove_vacuous_cond(AbstractGraphPass):
"""
Remove cond op and it's sub-graphs that produces identity on both then and
else branch. One example use case is the TensorListReverse op, in Core ML,
we dynamically resize in write operations, and thus, both branches of the
cond op will be a skip (identity) op.
Given:
main(%a: (1, bool),
%b: (2, 3, fp32)) {
block0() {
%squeeze_0: (bool) = squeeze(x=%a, name="squeeze_0")
%cond_0: (2, 3, fp32) = cond(pred=%squeeze_0, name="cond_0")
cond_0_true() {
%identity_0: (2, 3, fp32) = identity(x=%b, name="identity_0")
} -> (%identity_0)
cond_0_false() {
%identity_1: (2, 3, fp32) = identity(x=%b, name="identity_1")
} -> (%identity_1)
} -> (%cond_0)
}
Result:
main(%a: (1, bool),
%b: (2, 3, fp32)) {
block0() {
%squeeze_0: (bool) = squeeze(x=%a, name="squeeze_0")
%cond_0: (2, 3, fp32) = identity(x=%b, name="cond_0")
} -> (%cond_0)
}
"""
def apply(self, prog):
for f_name, f in prog.functions.items():
num_changes = _remove_vacuous_cond_block(f)
msg = "remove_vacuous_cond: changed {} ops in function '{}'"
logger.info(msg.format(num_changes, f_name))
| bsd-3-clause | 5ad63bd9a841754077d29ebdf38c8fda | 38.364407 | 86 | 0.572013 | 3.54851 | false | false | false | false |
apple/coremltools | deps/protobuf/python/google/protobuf/internal/generator_test.py | 69 | 14773 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): Flesh this out considerably. We focused on reflection_test.py
# first, since it's testing the subtler code, and since it provides decent
# indirect testing of the protocol compiler output.
"""Unittest that directly tests the output of the pure-Python protocol
compiler. See //google/protobuf/internal/reflection_test.py for a test which
further ensures that we can use Python protocol message objects as we expect.
"""
__author__ = 'robinson@google.com (Will Robinson)'
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf.internal import test_bad_identifiers_pb2
from google.protobuf import unittest_custom_options_pb2
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_import_public_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_mset_wire_format_pb2
from google.protobuf import unittest_no_generic_services_pb2
from google.protobuf import unittest_pb2
from google.protobuf import service
from google.protobuf import symbol_database
MAX_EXTENSION = 536870912
class GeneratorTest(unittest.TestCase):
def testNestedMessageDescriptor(self):
field_name = 'optional_nested_message'
proto_type = unittest_pb2.TestAllTypes
self.assertEqual(
proto_type.NestedMessage.DESCRIPTOR,
proto_type.DESCRIPTOR.fields_by_name[field_name].message_type)
def testEnums(self):
# We test only module-level enums here.
# TODO(robinson): Examine descriptors directly to check
# enum descriptor output.
self.assertEqual(4, unittest_pb2.FOREIGN_FOO)
self.assertEqual(5, unittest_pb2.FOREIGN_BAR)
self.assertEqual(6, unittest_pb2.FOREIGN_BAZ)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testExtremeDefaultValues(self):
message = unittest_pb2.TestExtremeDefaultValues()
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
self.assertTrue(isinf(message.inf_double))
self.assertTrue(message.inf_double > 0)
self.assertTrue(isinf(message.neg_inf_double))
self.assertTrue(message.neg_inf_double < 0)
self.assertTrue(isnan(message.nan_double))
self.assertTrue(isinf(message.inf_float))
self.assertTrue(message.inf_float > 0)
self.assertTrue(isinf(message.neg_inf_float))
self.assertTrue(message.neg_inf_float < 0)
self.assertTrue(isnan(message.nan_float))
self.assertEqual("? ? ?? ?? ??? ??/ ??-", message.cpp_trigraph)
def testHasDefaultValues(self):
desc = unittest_pb2.TestAllTypes.DESCRIPTOR
expected_has_default_by_name = {
'optional_int32': False,
'repeated_int32': False,
'optional_nested_message': False,
'default_int32': True,
}
has_default_by_name = dict(
[(f.name, f.has_default_value)
for f in desc.fields
if f.name in expected_has_default_by_name])
self.assertEqual(expected_has_default_by_name, has_default_by_name)
def testContainingTypeBehaviorForExtensions(self):
self.assertEqual(unittest_pb2.optional_int32_extension.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
self.assertEqual(unittest_pb2.TestRequired.single.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
def testExtensionScope(self):
self.assertEqual(unittest_pb2.optional_int32_extension.extension_scope,
None)
self.assertEqual(unittest_pb2.TestRequired.single.extension_scope,
unittest_pb2.TestRequired.DESCRIPTOR)
def testIsExtension(self):
self.assertTrue(unittest_pb2.optional_int32_extension.is_extension)
self.assertTrue(unittest_pb2.TestRequired.single.is_extension)
message_descriptor = unittest_pb2.TestRequired.DESCRIPTOR
non_extension_descriptor = message_descriptor.fields_by_name['a']
self.assertTrue(not non_extension_descriptor.is_extension)
def testOptions(self):
proto = unittest_mset_wire_format_pb2.TestMessageSet()
self.assertTrue(proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testMessageWithCustomOptions(self):
proto = unittest_custom_options_pb2.TestMessageWithCustomOptions()
enum_options = proto.DESCRIPTOR.enum_types_by_name['AnEnum'].GetOptions()
self.assertTrue(enum_options is not None)
# TODO(gps): We really should test for the presence of the enum_opt1
# extension and for its value to be set to -789.
def testNestedTypes(self):
self.assertEqual(
set(unittest_pb2.TestAllTypes.DESCRIPTOR.nested_types),
set([
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR,
unittest_pb2.TestAllTypes.OptionalGroup.DESCRIPTOR,
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR,
]))
self.assertEqual(unittest_pb2.TestEmptyMessage.DESCRIPTOR.nested_types, [])
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.nested_types, [])
def testContainingType(self):
self.assertTrue(
unittest_pb2.TestEmptyMessage.DESCRIPTOR.containing_type is None)
self.assertTrue(
unittest_pb2.TestAllTypes.DESCRIPTOR.containing_type is None)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testContainingTypeInEnumDescriptor(self):
self.assertTrue(unittest_pb2._FOREIGNENUM.containing_type is None)
self.assertEqual(unittest_pb2._TESTALLTYPES_NESTEDENUM.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testPackage(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.file.package,
'protobuf_unittest')
desc = unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR
self.assertEqual(desc.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_import_pb2.ImportMessage.DESCRIPTOR.file.package,
'protobuf_unittest_import')
self.assertEqual(
unittest_pb2._FOREIGNENUM.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_pb2._TESTALLTYPES_NESTEDENUM.file.package,
'protobuf_unittest')
self.assertEqual(
unittest_import_pb2._IMPORTENUM.file.package,
'protobuf_unittest_import')
def testExtensionRange(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.extension_ranges, [])
self.assertEqual(
unittest_pb2.TestAllExtensions.DESCRIPTOR.extension_ranges,
[(1, MAX_EXTENSION)])
self.assertEqual(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR.extension_ranges,
[(42, 43), (4143, 4244), (65536, MAX_EXTENSION)])
def testFileDescriptor(self):
self.assertEqual(unittest_pb2.DESCRIPTOR.name,
'google/protobuf/unittest.proto')
self.assertEqual(unittest_pb2.DESCRIPTOR.package, 'protobuf_unittest')
self.assertFalse(unittest_pb2.DESCRIPTOR.serialized_pb is None)
self.assertEqual(unittest_pb2.DESCRIPTOR.dependencies,
[unittest_import_pb2.DESCRIPTOR])
self.assertEqual(unittest_import_pb2.DESCRIPTOR.dependencies,
[unittest_import_public_pb2.DESCRIPTOR])
self.assertEqual(unittest_import_pb2.DESCRIPTOR.public_dependencies,
[unittest_import_public_pb2.DESCRIPTOR])
def testNoGenericServices(self):
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "TestMessage"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "FOO"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "test_extension"))
# Make sure unittest_no_generic_services_pb2 has no services subclassing
# Proto2 Service class.
if hasattr(unittest_no_generic_services_pb2, "TestService"):
self.assertFalse(issubclass(unittest_no_generic_services_pb2.TestService,
service.Service))
def testMessageTypesByName(self):
file_type = unittest_pb2.DESCRIPTOR
self.assertEqual(
unittest_pb2._TESTALLTYPES,
file_type.message_types_by_name[unittest_pb2._TESTALLTYPES.name])
# Nested messages shouldn't be included in the message_types_by_name
# dictionary (like in the C++ API).
self.assertFalse(
unittest_pb2._TESTALLTYPES_NESTEDMESSAGE.name in
file_type.message_types_by_name)
def testEnumTypesByName(self):
file_type = unittest_pb2.DESCRIPTOR
self.assertEqual(
unittest_pb2._FOREIGNENUM,
file_type.enum_types_by_name[unittest_pb2._FOREIGNENUM.name])
def testExtensionsByName(self):
file_type = unittest_pb2.DESCRIPTOR
self.assertEqual(
unittest_pb2.my_extension_string,
file_type.extensions_by_name[unittest_pb2.my_extension_string.name])
def testPublicImports(self):
# Test public imports as embedded message.
all_type_proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, all_type_proto.optional_public_import_message.e)
# PublicImportMessage is actually defined in unittest_import_public_pb2
# module, and is public imported by unittest_import_pb2 module.
public_import_proto = unittest_import_pb2.PublicImportMessage()
self.assertEqual(0, public_import_proto.e)
self.assertTrue(unittest_import_public_pb2.PublicImportMessage is
unittest_import_pb2.PublicImportMessage)
def testBadIdentifiers(self):
# We're just testing that the code was imported without problems.
message = test_bad_identifiers_pb2.TestBadIdentifiers()
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.message],
"foo")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.descriptor],
"bar")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.reflection],
"baz")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.service],
"qux")
def testOneof(self):
desc = unittest_pb2.TestAllTypes.DESCRIPTOR
self.assertEqual(1, len(desc.oneofs))
self.assertEqual('oneof_field', desc.oneofs[0].name)
self.assertEqual(0, desc.oneofs[0].index)
self.assertIs(desc, desc.oneofs[0].containing_type)
self.assertIs(desc.oneofs[0], desc.oneofs_by_name['oneof_field'])
nested_names = set(['oneof_uint32', 'oneof_nested_message',
'oneof_string', 'oneof_bytes'])
self.assertEqual(
nested_names,
set([field.name for field in desc.oneofs[0].fields]))
for field_name, field_desc in desc.fields_by_name.items():
if field_name in nested_names:
self.assertIs(desc.oneofs[0], field_desc.containing_oneof)
else:
self.assertIsNone(field_desc.containing_oneof)
class SymbolDatabaseRegistrationTest(unittest.TestCase):
"""Checks that messages, enums and files are correctly registered."""
def testGetSymbol(self):
self.assertEqual(
unittest_pb2.TestAllTypes, symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes'))
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage,
symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes.NestedMessage'))
with self.assertRaises(KeyError):
symbol_database.Default().GetSymbol('protobuf_unittest.NestedMessage')
self.assertEqual(
unittest_pb2.TestAllTypes.OptionalGroup,
symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes.OptionalGroup'))
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup,
symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes.RepeatedGroup'))
def testEnums(self):
self.assertEqual(
'protobuf_unittest.ForeignEnum',
symbol_database.Default().pool.FindEnumTypeByName(
'protobuf_unittest.ForeignEnum').full_name)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedEnum',
symbol_database.Default().pool.FindEnumTypeByName(
'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
def testFindFileByName(self):
self.assertEqual(
'google/protobuf/unittest.proto',
symbol_database.Default().pool.FindFileByName(
'google/protobuf/unittest.proto').name)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1b45d99ef90b21aabe23b02a7f5aafdf | 41.329513 | 80 | 0.720639 | 3.984088 | false | true | false | false |
pallets/itsdangerous | src/itsdangerous/url_safe.py | 1 | 2402 | import typing as _t
import zlib
from ._json import _CompactJSON
from .encoding import base64_decode
from .encoding import base64_encode
from .exc import BadPayload
from .serializer import Serializer
from .timed import TimedSerializer
class URLSafeSerializerMixin(Serializer):
"""Mixed in with a regular serializer it will attempt to zlib
compress the string to make it shorter if necessary. It will also
base64 encode the string so that it can safely be placed in a URL.
"""
default_serializer = _CompactJSON
def load_payload(
self,
payload: bytes,
*args: _t.Any,
serializer: _t.Optional[_t.Any] = None,
**kwargs: _t.Any,
) -> _t.Any:
decompress = False
if payload.startswith(b"."):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload(
"Could not base64 decode the payload because of an exception",
original_error=e,
) from e
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload(
"Could not zlib decompress the payload before decoding the payload",
original_error=e,
) from e
return super().load_payload(json, *args, **kwargs)
def dump_payload(self, obj: _t.Any) -> bytes:
json = super().dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b"." + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`.Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`.TimedSerializer` but dumps and loads into a
URL safe string consisting of the upper and lowercase character of
the alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
| bsd-3-clause | ad77daefe93673ce931f4e50fa15f630 | 29.025 | 88 | 0.601998 | 4.423573 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/examples/tsa/try_ar.py | 5 | 2517 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 21:45:24 2010
Author: josef-pktd
"""
import numpy as np
from scipy import signal
def armaloop(arcoefs, macoefs, x):
'''get arma recursion in simple loop
for simplicity assumes that ma polynomial is not longer than the ar-polynomial
Parameters
----------
arcoefs : array_like
autoregressive coefficients in right hand side parameterization
macoefs : array_like
moving average coefficients, without leading 1
Returns
-------
y : ndarray
predicted values, initial values are the same as the observed values
e : ndarray
predicted residuals, zero for initial observations
Notes
-----
Except for the treatment of initial observations this is the same as using
scipy.signal.lfilter, which is much faster. Written for testing only
'''
arcoefs_r = np.asarray(arcoefs)
macoefs_r = np.asarray(macoefs)
x = np.asarray(x)
nobs = x.shape[0]
#assume ar longer than ma
arlag = arcoefs_r.shape[0]
malag = macoefs_r.shape[0]
maxlag = max(arlag, malag)
print maxlag
y = np.zeros(x.shape, float)
e = np.zeros(x.shape, float)
y[:maxlag] = x[:maxlag]
#if malag > arlaga:
for t in range(arlag, maxlag):
y[t] = (x[t-arlag:t] * arcoefs_r).sum(0) + (e[:t] * macoefs_r[:t]).sum(0)
e[t] = x[t] - y[t]
for t in range(maxlag, nobs):
#wrong broadcasting, 1d only
y[t] = (x[t-arlag:t] * arcoefs_r).sum(0) + (e[t-malag:t] * macoefs_r).sum(0)
e[t] = x[t] - y[t]
return y, e
arcoefs, macoefs = -np.array([1, -0.8, 0.2])[1:], np.array([1., 0.5, 0.1])[1:]
print armaloop(arcoefs, macoefs, np.ones(10))
print armaloop([0.8], [], np.ones(10))
print armaloop([0.8], [], np.arange(2,10))
y, e = armaloop([0.1], [0.8], np.arange(2,10))
print e
print signal.lfilter(np.array([1, -0.1]), np.array([1., 0.8]), np.arange(2,10))
y, e = armaloop([], [0.8], np.ones(10))
print e
print signal.lfilter(np.array([1, -0.]), np.array([1., 0.8]), np.ones(10))
ic=signal.lfiltic(np.array([1, -0.1]), np.array([1., 0.8]), np.ones([0]), np.array([1]))
print signal.lfilter(np.array([1, -0.1]), np.array([1., 0.8]), np.ones(10), zi=ic)
zi = signal.lfilter_zi(np.array([1, -0.8, 0.2]), np.array([1., 0, 0]))
print signal.lfilter(np.array([1, -0.1]), np.array([1., 0.8]), np.ones(10), zi=zi)
print signal.filtfilt(np.array([1, -0.8]), np.array([1.]), np.ones(10))
#todo write examples/test across different versions
| bsd-3-clause | ba51043d5d917859294656a7ed088010 | 30.4625 | 88 | 0.609853 | 2.726977 | false | false | false | false |
yarikoptic/pystatsmodels | statsmodels/sandbox/stats/runs.py | 3 | 20029 | '''runstest
formulas for mean and var of runs taken from SAS manual NPAR tests, also idea
for runstest_1samp and runstest_2samp
Description in NIST handbook and dataplot doesn't explain their expected
values, or variance
Note:
There are (at least) two definitions of runs used in literature. The classical
definition which is also used here, is that runs are sequences of identical
observations separated by observations with different realizations.
The second definition allows for overlapping runs, or runs where counting a
run is also started after a run of a fixed length of the same kind.
TODO
* add one-sided tests where possible or where it makes sense
'''
import numpy as np
from scipy import stats
class Runs(object):
'''class for runs in a binary sequence
Parameters
----------
x : array_like, 1d
data array,
Notes
-----
This was written as a more general class for runs. This has some redundant
calculations when only the runs_test is used.
TODO: make it lazy
The runs test could be generalized to more than 1d if there is a use case
for it.
This should be extended once I figure out what the distribution of runs
of any length k is.
The exact distribution for the runs test is also available but not yet
verified.
'''
def __init__(self, x):
self.x = np.asarray(x)
self.runstart = runstart = np.nonzero(np.diff(np.r_[[-np.inf], x, [np.inf]]))[0]
self.runs = runs = np.diff(runstart)
self.runs_sign = runs_sign = x[runstart[:-1]]
self.runs_pos = runs[runs_sign==1]
self.runs_neg = runs[runs_sign==0]
self.runs_freqs = np.bincount(runs)
self.n_runs = len(self.runs)
self.n_pos = (x==1).sum()
def runs_test(self, correction=True):
'''basic version of runs test
Parameters
----------
correction: bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
pvalue based on normal distribution, with integer correction
'''
self.npo = npo = (self.runs_pos).sum()
self.nne = nne = (self.runs_neg).sum()
#n_r = self.n_runs
n = npo + nne
npn = npo * nne
rmean = 2. * npn / n + 1
rvar = 2. * npn * (2.*npn - n) / n**2. / (n-1.)
rstd = np.sqrt(rvar)
rdemean = self.n_runs - rmean
if n >= 50 or not correction:
z = rdemean
else:
if rdemean > 0.5:
z = rdemean - 0.5
elif rdemean < 0.5:
z = rdemean + 0.5
else:
z = 0.
z /= rstd
pval = 2 * stats.norm.sf(np.abs(z))
return z, pval
def runstest_1samp(x, cutoff='mean', correction=True):
'''use runs test on binary discretized data above/below cutoff
Parameters
----------
x : array_like
data, numeric
cutoff : {'mean', 'median'} or number
This specifies the cutoff to split the data into large and small
values.
correction: bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha .
'''
if cutoff == 'mean':
cutoff = np.mean(x)
elif cutoff == 'median':
cutoff = np.median(x)
xindicator = (x >= cutoff).astype(int)
return Runs(xindicator).runs_test(correction=correction)
def runstest_2samp(x, y=None, groups=None, correction=True):
'''Wald-Wolfowitz runstest for two samples
This tests whether two samples come from the same distribution.
Parameters
----------
x : array_like
data, numeric, contains either one group, if y is also given, or
both groups, if additionally a group indicator is provided
y : array_like (optional)
data, numeric
groups : array_like
group labels or indicator the data for both groups is given in a
single 1-dimensional array, x. If group labels are not [0,1], then
correction: bool
Following the SAS manual, for samplesize below 50, the test
statistic is corrected by 0.5. This can be turned off with
correction=False, and was included to match R, tseries, which
does not use any correction.
Returns
-------
z_stat : float
test statistic, asymptotically normally distributed
p-value : float
p-value, reject the null hypothesis if it is below an type 1 error
level, alpha .
Notes
-----
Wald-Wolfowitz runs test.
If there are ties, then then the test statistic and p-value that is
reported, is based on the higher p-value between sorting all tied
observations of the same group
This test is intended for continuous distributions
SAS has treatment for ties, but not clear, and sounds more complicated
(minimum and maximum possible runs prevent use of argsort)
(maybe it's not so difficult, idea: add small positive noise to first
one, run test, then to the other, run test, take max(?) p-value - DONE
This gives not the minimum and maximum of the number of runs, but should
be close. Not true, this is close to minimum but far away from maximum.
maximum number of runs would use alternating groups in the ties.)
Maybe adding random noise would be the better approach.
SAS has exact distribution for sample size <=30, doesn't look standard
but should be easy to add.
currently two-sided test only
This has not been verified against a reference implementation. In a short
Monte Carlo simulation where both samples are normally distribute, the test
seems to be correctly sized for larger number of observations (30 or
larger), but conservative (i.e. reject less often than nominal) with a
sample size of 10 in each group.
See Also
--------
runs_test_1samp
Runs
RunsProb
'''
x = np.asarray(x)
if not y is None:
y = np.asarray(y)
groups = np.concatenate((np.zeros(len(x)), np.ones(len(y))))
# note reassigning x
x = np.concatenate((x, y))
gruni = np.arange(2)
elif not groups is None:
gruni = np.unique(groups)
if gruni.size != 2: # pylint: disable=E1103
raise ValueError('not exactly two groups specified')
#require groups to be numeric ???
else:
raise ValueError('either y or groups is necessary')
xargsort = np.argsort(x)
#check for ties
x_sorted = x[xargsort]
x_diff = np.diff(x_sorted) # used for detecting and handling ties
if x_diff.min() == 0:
print 'ties detected' #replace with warning
x_mindiff = x_diff[x_diff > 0].min()
eps = x_mindiff/2.
xx = x.copy() #don't change original, just in case
xx[groups==gruni[0]] += eps
xargsort = np.argsort(xx)
xindicator = groups[xargsort]
z0, p0 = Runs(xindicator).runs_test(correction=correction)
xx[groups==gruni[0]] -= eps #restore xx = x
xx[groups==gruni[1]] += eps
xargsort = np.argsort(xx)
xindicator = groups[xargsort]
z1, p1 = Runs(xindicator).runs_test(correction=correction)
idx = np.argmax([p0,p1])
return [z0, z1][idx], [p0, p1][idx]
else:
xindicator = groups[xargsort]
return Runs(xindicator).runs_test(correction=correction)
try:
from scipy import comb # pylint: disable=E0611
except ImportError:
from scipy.misc import comb
class TotalRunsProb(object):
'''class for the probability distribution of total runs
This is the exact probability distribution for the (Wald-Wolfowitz)
runs test. The random variable is the total number of runs if the
sample has (n0, n1) observations of groups 0 and 1.
Notes
-----
Written as a class so I can store temporary calculations, but I don't
think it matters much.
Formulas taken from SAS manual for one-sided significance level.
Could be converted to a full univariate distribution, subclassing
scipy.stats.distributions.
*Status*
Not verified yet except for mean.
'''
def __init__(self, n0, n1):
self.n0 = n0
self.n1 = n1
self.n = n = n0 + n1
self.comball = comb(n, n1)
def runs_prob_even(self, r):
n0, n1 = self.n0, self.n1
tmp0 = comb(n0-1, r//2-1)
tmp1 = comb(n1-1, r//2-1)
return tmp0 * tmp1 * 2. / self.comball
def runs_prob_odd(self, r):
n0, n1 = self.n0, self.n1
k = (r+1)//2
tmp0 = comb(n0-1, k-1)
tmp1 = comb(n1-1, k-2)
tmp3 = comb(n0-1, k-2)
tmp4 = comb(n1-1, k-1)
return (tmp0 * tmp1 + tmp3 * tmp4) / self.comball
def pdf(self, r):
r = np.asarray(r)
r_isodd = np.mod(r, 2) > 0
r_odd = r[r_isodd]
r_even = r[~r_isodd]
runs_pdf = np.zeros(r.shape)
runs_pdf[r_isodd] = self.runs_prob_odd(r_odd)
runs_pdf[~r_isodd] = self.runs_prob_even(r_even)
return runs_pdf
def cdf(self, r):
r_ = np.arange(2,r+1)
cdfval = self.runs_prob_even(r_[::2]).sum()
cdfval += self.runs_prob_odd(r_[1::2]).sum()
return cdfval
class RunsProb(object):
'''distribution of success runs of length k or more (classical definition)
The underlying process is assumed to be a sequence of Bernoulli trials
of a given length n.
not sure yet, how to interpret or use the distribution for runs
of length k or more.
Musseli also has longest success run, and waiting time distribution
negative binomial of order k and geometric of order k
need to compare with Godpole
need a MonteCarlo function to do some quick tests before doing more
'''
def pdf(self, x, k, n, p):
'''distribution of success runs of length k or more
Parameters
----------
x : float
count of runs of length n
k : int
length of runs
n : int
total number of observations or trials
p : float
probability of success in each Bernoulli trial
Returns
-------
pdf : float
probability that x runs of length of k are observed
Notes
-----
not yet vectorized
References
----------
Muselli 1996, theorem 3
'''
q = 1-p
m = np.arange(x, (n+1)//(k+1)+1)[:,None]
terms = (-1)**(m-x) * comb(m, x) * p**(m*k) * q**(m-1) \
* (comb(n - m*k, m - 1) + q * comb(n - m*k, m))
return terms.sum(0)
def pdf_nb(self, x, k, n, p):
pass
#y = np.arange(m-1, n-mk+1
'''
>>> [np.sum([RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]) for k in range(16)]
[0.99999332193894064, 0.99999999999999367, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> [(np.arange(0,16) * [RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]).sum() for k in range(16)]
[6.9998931510341809, 4.1406249999999929, 2.4414062500000075, 1.4343261718749996, 0.83923339843749856, 0.48875808715820324, 0.28312206268310569, 0.1629814505577086, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> np.array([(np.arange(0,16) * [RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]).sum() for k in range(16)])/11
array([ 0.63635392, 0.37642045, 0.22194602, 0.13039329, 0.07629395,
0.04443255, 0.02573837, 0.0148165 , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. ])
>>> np.diff([(np.arange(0,16) * [RunsProb().pdf(xi, k, 16, 10/16.) for xi in range(0,16)]).sum() for k in range(16)][::-1])
array([ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.16298145, 0.12014061, 0.20563602,
0.35047531, 0.59509277, 1.00708008, 1.69921875, 2.85926815])
'''
def median_test_ksample(x, groups):
'''chisquare test for equality of median/location
This tests whether all groups have the same fraction of observations
above the median.
Parameters
----------
x : array_like
data values stacked for all groups
groups : array_like
group labels or indicator
Returns
-------
stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
others ????
currently some test output, table and expected
'''
x = np.asarray(x)
gruni = np.unique(groups)
xli = [x[groups==group] for group in gruni]
xmedian = np.median(x)
counts_larger = np.array([(xg > xmedian).sum() for xg in xli])
counts = np.array([len(xg) for xg in xli])
counts_smaller = counts - counts_larger
nobs = counts.sum()
n_larger = (x > xmedian).sum()
n_smaller = nobs - n_larger
table = np.vstack((counts_smaller, counts_larger))
#the following should be replaced by chisquare_contingency table
expected = np.vstack((counts * 1. / nobs * n_smaller,
counts * 1. / nobs * n_larger))
if (expected < 5).any():
print('Warning: There are cells with less than 5 expected' \
'observations. The chisquare distribution might not be a good' \
'approximation for the true distribution.')
#check ddof
return stats.chisquare(table.ravel(), expected.ravel(), ddof=1), table, expected
def cochrans_q(x):
'''Cochran's Q test for identical effect of k treatments
Cochran's Q is a k-sample extension of the McNemar test. If there are only
two treatments, then Cochran's Q test and McNemar test are equivalent.
Test that the probability of success is the same for each treatment.
The alternative is that at least two treatments have a different
probability of success.
Parameters
----------
x : array_like, 2d (N,k)
data with N cases and k variables
Returns
-------
q_stat : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
In Wikipedia terminology, rows are blocks and columns are treatments.
The number of rows N, should be large for the chisquare distribution to be
a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
http://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
'''
x = np.asarray(x)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x==gruni[-1]).sum(1, float)
count_col_success = (x==gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
#this is SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
#Note: the denominator looks just like k times the variance of the
#columns
#Wikipedia uses a different, but equivalent expression
## q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
## / (k * count_col_ss - np.sum(count_col_success**2))
return q_stat, stats.chi2.sf(q_stat, k-1)
def mcnemar(x, y=None, exact=True, correction=True):
'''McNemar test
Parameters
----------
x, y : array_like
two paired data samples. If y is None, then x can be a 2 by 2
contingency table. x and y can have more than one dimension, then
the results are calculated under the assumption that axis zero
contains the observation for the samples.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be used, which
is the approximation to the distribution of the test statistic for
large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
stat : float or int, array
The test statistic is the chisquare statistic if exact is false. If the
exact binomial distribution is used, then this contains the min(n1, n2),
where n1, n2 are cases that are zero in one sample but one in the other
sample.
pvalue : float or array
p-value of the null hypothesis of equal effects.
Notes
-----
This is a special case of Cochran's Q test. The results when the chisquare
distribution is used are identical, except for continuity correction.
'''
x = np.asarray(x)
if y is None and x.shape[0] == x.shape[1]:
if x.shape[0] != 2:
raise ValueError('table needs to be 2 by 2')
n1, n2 = x[1, 0], x[0, 1]
else:
# I'm not checking here whether x and y are binary,
# isn't this also paired sign test
n1 = np.sum(x < y, 0)
n2 = np.sum(x > y, 0)
if exact:
stat = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pval = stats.binom.cdf(stat, n1 + n2, 0.5) * 2
pval = np.minimum(pval, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
stat = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pval = stats.chi2.sf(stat, df)
return stat, pval
def symmetry_bowker(table):
'''Test for symmetry of a (k, k) square contingency table
This is an extension of the McNemar test to test the Null hypothesis
that the contingency table is symmetric around the main diagonal, that is
n_{i, j} = n_{j, i} for all i, j
Parameters
----------
table : array_like, 2d, (k, k)
a square contingency table that contains the count for k categories
in rows and columns.
Returns
-------
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
Implementation is based on the SAS documentation, R includes it in
`mcnemar.test` if the table is not 2 by 2.
The pvalue is based on the chisquare distribution which requires that the
sample size is not very small to be a good approximation of the true
distribution. For 2x2 contingency tables exact distribution can be
obtained with `mcnemar`
See Also
--------
mcnemar
'''
table = np.asarray(table)
k, k2 = table.shape
if k != k2:
raise ValueError('table needs to be square')
#low_idx = np.tril_indices(k, -1) # this doesn't have Fortran order
upp_idx = np.triu_indices(k, 1)
tril = table.T[upp_idx] # lower triangle in column order
triu = table[upp_idx] # upper triangle in row order
stat = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pval = stats.chi2.sf(stat, df)
return stat, pval, df
if __name__ == '__main__':
x1 = np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1])
print Runs(x1).runs_test()
print runstest_1samp(x1, cutoff='mean')
print runstest_2samp(np.arange(16,0,-1), groups=x1)
print TotalRunsProb(7,9).cdf(11)
print median_test_ksample(np.random.randn(100), np.random.randint(0,2,100))
print cochrans_q(np.random.randint(0,2,(100,8)))
| bsd-3-clause | 13ec4a5a19336268ee5958e02dff65f0 | 30.944179 | 203 | 0.608917 | 3.504637 | false | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.