repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sergiocorreia/panflute | panflute/base.py | 1 | 8673 | """
Base classes and methods of all Pandoc elements
"""
# ---------------------------
# Imports
# ---------------------------
from operator import attrgetter
from collections.abc import MutableSequence, MutableMapping
from .containers import ListContainer, DictContainer
from .utils import check_type, encode_dict # check_group
# ---------------------------
# Meta Classes
# ---------------------------
class Element(object):
"""
Base class of all Pandoc elements
"""
__slots__ = ['parent', 'location', 'index']
_children = []
def __new__(cls, *args, **kwargs):
# This is just to initialize self.parent to None
element = object.__new__(cls)
element.parent = None
element.location = None
element.index = None
return element
@property
def tag(self):
tag = type(self).__name__
return tag
def __eq__(self, other):
# Doc has a different method b/c it uses __dict__ instead of slots
if type(self) != type(other):
return False
for key in self.__slots__:
if getattr(self, key) != getattr(other, key):
return False
return True
# ---------------------------
# Base methods
# ---------------------------
# Should be overridden except for trivial elements (Space, Null, etc.)
def __repr__(self):
# This is just a convenience method
# Override it for more complex elements
extra = []
for key in self.__slots__:
if not key.startswith('_') and key != 'text':
val = getattr(self, key)
if val not in ([], dict(), ''):
extra.append([key, val])
if extra:
extra = ('{}={}'.format(k, repr(v)) for k, v in extra)
extra = '; ' + ', '.join(x for x in extra)
else:
extra = ''
if '_content' in self.__slots__:
content = ' '.join(repr(x) for x in self.content)
return '{}({}{})'.format(self.tag, content, extra)
elif 'text' in self.__slots__:
return '{}({}{})'.format(self.tag, self.text, extra)
else:
return self.tag
def to_json(self):
return encode_dict(self.tag, self._slots_to_json())
def _slots_to_json(self):
# Default when the element contains nothing
return []
# ---------------------------
# .identifier .classes .attributes
# ---------------------------
def _set_ica(self, identifier, classes, attributes):
self.identifier = check_type(identifier, str)
self.classes = [check_type(cl, str) for cl in classes]
self.attributes = dict(attributes)
def _ica_to_json(self):
return [self.identifier, self.classes, list(self.attributes.items())]
# ---------------------------
# .content (setter and getter)
# ---------------------------
@property
def content(self):
"""
Sequence of :class:`Element` objects (usually either :class:`Block`
or :class:`Inline`) that are "children" of the current element.
Only available for elements that accept ``*args``.
Note: some elements have children in attributes other than ``content``
(such as :class:`.Table` that has children in the header and
caption attributes).
"""
return self._content
@content.setter
def content(self, value):
oktypes = self._content.oktypes
value = value.list if isinstance(value, ListContainer) else list(value)
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
def _set_content(self, value, oktypes):
"""
Similar to content.setter but when there are no existing oktypes
"""
if value is None:
value = []
self._content = ListContainer(*value, oktypes=oktypes, parent=self)
# ---------------------------
# Navigation
# ---------------------------
@property
def container(self):
"""
Rarely used attribute that returns the ``ListContainer`` or
``DictContainer`` that contains the element
(or returns None if no such container exist)
:rtype: ``ListContainer`` | ``DictContainer`` | ``None``
"""
if self.parent is None:
return None
elif self.location is None:
return self.parent.content
else:
container = getattr(self.parent, self.location)
if isinstance(container, (ListContainer, DictContainer)):
return container
else:
assert self is container # id(self) == id(container)
def offset(self, n):
"""
Return a sibling element offset by n
:rtype: :class:`Element` | ``None``
"""
idx = self.index
if idx is not None:
sibling = idx + n
container = self.container
if 0 <= sibling < len(container):
return container[sibling]
@property
def next(self):
"""
Return the next sibling.
Note that ``elem.offset(1) == elem.next``
:rtype: :class:`Element` | ``None``
"""
return self.offset(1)
@property
def prev(self):
"""
Return the previous sibling.
Note that ``elem.offset(-1) == elem.prev``
:rtype: :class:`Element` | ``None``
"""
return self.offset(-1)
def ancestor(self, n):
"""
Return the n-th ancestor.
Note that ``elem.ancestor(1) == elem.parent``
:rtype: :class:`Element` | ``None``
"""
if not isinstance(n, int) or n < 1:
raise TypeError('Ancestor needs to be positive, received', n)
if n == 1 or self.parent is None:
return self.parent
else:
return self.parent.ancestor(n-1)
# ---------------------------
# Walking
# ---------------------------
@property
def doc(self):
"""
Return the root Doc element (if there is one)
"""
guess = self
while guess is not None and guess.tag != 'Doc':
guess = guess.parent # If no parent, this will be None
return guess # Returns either Doc or None
def walk(self, action, doc=None, stop_if=None):
"""
Walk through the element and all its children (sub-elements),
applying the provided function ``action``.
A trivial example would be:
.. code-block:: python
from panflute import *
def no_action(elem, doc):
pass
doc = Doc(Para(Str('a')))
altered = doc.walk(no_action)
:param action: function that takes (element, doc) as arguments.
:type action: :class:`function`
:param doc: root document; used to access metadata,
the output format (in ``.format``, other elements, and
other variables). Only use this variable if for some reason
you don't want to use the current document of an element.
:type doc: :class:`.Doc`
:param stop_if: function that takes (element) as argument.
:type stop_if: :class:`function`, optional
:rtype: :class:`Element` | ``[]`` | ``None``
"""
# Infer the document thanks to .parent magic
if doc is None:
doc = self.doc
# First iterate over children; unless the stop condition is met
if stop_if is None or not stop_if(self):
# self._children has property *names* so we need a bit of getattr/setattr magic to modify the objects themselves
children = ((child_name, getattr(self, child_name)) for child_name in self._children)
for child_name, child in children:
if isinstance(child, (Element, ListContainer, DictContainer)):
child = child.walk(action, doc, stop_if)
elif child is None:
child = None # Empty table headers or captions
else:
raise TypeError(type(child))
setattr(self, child_name, child)
# Then apply the action() to the root element
altered = action(self, doc)
return self if altered is None else altered
class Inline(Element):
"""
Base class of all inline elements
"""
__slots__ = []
class Block(Element):
"""
Base class of all block elements
"""
__slots__ = []
class MetaValue(Element):
"""
Base class of all metadata elements
"""
__slots__ = []
| bsd-3-clause | 88d233e663f8adbe629cade678cffd3e | 28.5 | 124 | 0.531189 | 4.484488 | false | false | false | false |
morepath/morepath | morepath/tests/test_predicates.py | 1 | 1348 | from morepath.app import App
from webtest import TestApp as Client
import morepath
from reg import KeyIndex
def test_view_predicates():
class app(App):
pass
@app.path(path="")
class Root:
pass
@app.view(model=Root, name="foo", request_method="GET")
def get(self, request):
return "GET"
@app.view(model=Root, name="foo", request_method="POST")
def post(self, request):
return "POST"
c = Client(app())
response = c.get("/foo")
assert response.body == b"GET"
response = c.post("/foo")
assert response.body == b"POST"
def test_extra_predicates():
class app(App):
pass
@app.path(path="{id}")
class Model:
def __init__(self, id):
self.id = id
@app.view(model=Model, name="foo", id="a")
def get_a(self, request):
return "a"
@app.view(model=Model, name="foo", id="b")
def get_b(self, request):
return "b"
@app.predicate(
morepath.App.get_view,
name="id",
default="",
index=KeyIndex,
after=morepath.request_method_predicate,
)
def id_predicate(self, obj, request):
return obj.id
c = Client(app())
response = c.get("/a/foo")
assert response.body == b"a"
response = c.get("/b/foo")
assert response.body == b"b"
| bsd-3-clause | 09d0ef727a506341014df61aaa81d039 | 20.0625 | 60 | 0.568991 | 3.42132 | false | true | false | false |
morepath/morepath | morepath/predicate.py | 1 | 4328 | """
The :meth:`morepath.App.predicate` directive lets you install predicates
for function that use :func:`reg.dispatch_method`. This is
used by :mod:`morepath.core` to install the view predicates, and you can
also use it for your own functions.
This implements the functionality that drives Reg to install these
predicates.
See also :class:`morepath.directive.PredicateRegistry`
"""
from reg import Predicate
from .toposort import toposorted, Info
from collections import defaultdict
class PredicateRegistry:
"""A registry of what predicates are registered for which functions.
It also keeps track of how predicates are to be ordered.
"""
app_class_arg = True
def __init__(self, app_class):
self.app_class = app_class
self._predicate_infos = defaultdict(list)
self._predicate_fallbacks = defaultdict(dict)
def register_predicate(
self, func, dispatch, name, default, index, before, after
):
"""Register a predicate for installation into the reg registry.
See :meth:`morepath.App.predicate` for details.
:param func: the function that implements the predicate.
:param dispatch: the dispatch function to register the predicate on.
:param name: name of the predicate.
:param default: default value.
:param index: index to use.
:param before: predicate function to have priority over.
:param after: predicate function that has priority over this one.
"""
info = PredicateInfo(func, name, default, index, before, after)
self._predicate_infos[dispatch].append(info)
def register_predicate_fallback(self, dispatch, func, fallback_func):
"""Register a predicate fallback for installation into reg registry.
See :meth:`morepath.App.predicate_fallback` for details.
:param dispatch: the dispatch function to register fallback on.
:param func: the predicate function to register fallback for.
:param fallback_func: the fallback function.
"""
self._predicate_fallbacks[dispatch][func] = fallback_func
def install_predicates(self):
"""Install the predicates with reg.
This should be called during configuration once all predicates
and fallbacks are known. Uses
:meth:`PredicateRegistry.get_predicates` to get out the
predicates in the correct order.
"""
for dispatch in self._predicate_infos.keys():
getattr(self.app_class, dispatch.__name__).add_predicates(
self.get_predicates(dispatch)
)
def get_predicates(self, dispatch):
"""Create Reg predicates.
This creates :class:`reg.Predicate` objects for a particular
dispatch function.
Uses :meth:`PredicateRegistry.sorted_predicate_infos` to sort
the predicate infos.
:param dispatch: the dispatch function to create the predicates for.
:return: a list of :class:`reg.Predicate` instances in the
correct order.
"""
infos = self.sorted_predicate_infos(dispatch)
result = []
for info in infos:
fallback = self._predicate_fallbacks[dispatch].get(info.func)
predicate = Predicate(
info.name,
info.index,
adapt(info.func),
fallback=fallback,
default=info.default,
)
result.append(predicate)
return result
def sorted_predicate_infos(self, dispatch):
"""Topologically sort predicate infos for a dispatch function.
:param dispatch: the dispatch function to sort for.
:return: a list of sorted :class:`PredicateInfo` instances.
"""
return toposorted(self._predicate_infos[dispatch])
def adapt(func):
def wrapper(d):
return func(**d)
return wrapper
class PredicateInfo(Info):
"""Used by :class:`PredicateRegistry` internally.
Is used to store registration information on a predicate
before it is registered with Reg.
"""
def __init__(self, func, name, default, index, before, after):
super().__init__(func, before, after)
self.func = func
self.name = name
self.default = default
self.index = index
| bsd-3-clause | 0e9cfceb537cb828fcec49f6f77defd2 | 32.292308 | 76 | 0.65134 | 4.536688 | false | false | false | false |
scikit-hep/root_numpy | examples/core/plot_bootstrap.py | 3 | 2604 | #!/usr/bin/env python
"""
============================
Bootstrap a TTree with NumPy
============================
This example demonstrates how to sample entries in a TTree with replacement
with the help of NumPy and root_numpy. This example depends on
`rootpy <http://www.rootpy.org/>`_ which can be installed with pip::
pip install --user rootpy
"""
from rootpy.extern.six.moves import range
from rootpy.tree import Tree, TreeModel, FloatCol
from rootpy.plotting import Canvas, Hist2D, set_style
from rootpy.io import root_open
from root_numpy import root2array, array2tree, rec2array, fill_hist
import ROOT
import numpy as np
from random import gauss
import random
import os
ROOT.gROOT.SetBatch()
set_style('ATLAS')
np.random.seed(0)
random.seed(0)
# create an example TTree dataset
class Sample(TreeModel):
x = FloatCol()
y = FloatCol()
with root_open('sample.root', 'recreate'):
# generate toy data in a TTree
tree = Tree('sample', model=Sample)
for i in range(500):
tree.x = gauss(0, 1)
tree.y = gauss(0, 1)
tree.Fill()
tree.write()
# read in the TTree as a NumPy array
array = root2array('sample.root', 'sample')
if os.path.exists('bootstrap.gif'):
os.remove('bootstrap.gif')
# Canvas name is set here to aid the automatic documentation generation
# It needs to take the GIF already saved instead of saving a png of the last
# frame.
canvas = Canvas(width=500, height=400, name='bootstrap.gif')
hist = Hist2D(10, -3, 3, 10, -3, 3, drawstyle='LEGO2')
output = root_open('bootstrap.root', 'recreate')
# bootstrap 10 times
for bootstrap_idx in range(10):
# sample with replacement
# http://docs.scipy.org/doc/numpy-dev/reference/generated/numpy.random.choice.html
sample_idx = np.random.choice(len(array), size=len(array), replace=True)
array_bootstrapped = array[sample_idx]
# convert back to a TTree and write it out
tree_bootstrapped = array2tree(
array_bootstrapped,
name='bootstrap_{0}'.format(bootstrap_idx))
tree_bootstrapped.Write()
tree_bootstrapped.Delete()
# fill the ROOT histogram with the numpy array
hist.Reset()
fill_hist(hist, rec2array(array_bootstrapped))
hist.Draw()
hist.xaxis.title = 'x'
hist.yaxis.title = 'y'
hist.zaxis.title = 'Events'
hist.xaxis.limits = (-2.5, 2.5)
hist.yaxis.limits = (-2.5, 2.5)
hist.zaxis.range_user = (0, 30)
hist.xaxis.divisions = 5
hist.yaxis.divisions = 5
hist.zaxis.divisions = 5
canvas.Print('bootstrap.gif+50')
# loop the gif
canvas.Print('bootstrap.gif++')
output.Close()
| bsd-3-clause | 2826b7b72f7be9df63d2e2c4e0d7e51c | 28.258427 | 86 | 0.679339 | 3.304569 | false | false | false | false |
flutter/buildroot | build/linux/unbundle/replace_gyp_files.py | 1 | 2938 | #!/usr/bin/env python3
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Replaces gyp files in tree with files from here that
make the build use system libraries.
"""
import optparse
import os.path
import shutil
import sys
REPLACEMENTS = {
'use_system_expat': 'third_party/expat/expat.gyp',
'use_system_ffmpeg': 'third_party/ffmpeg/ffmpeg.gyp',
'use_system_flac': 'third_party/flac/flac.gyp',
'use_system_harfbuzz': 'third_party/harfbuzz-ng/harfbuzz.gyp',
'use_system_icu': 'third_party/icu/icu.gyp',
'use_system_jsoncpp': 'third_party/jsoncpp/jsoncpp.gyp',
'use_system_libevent': 'third_party/libevent/libevent.gyp',
'use_system_libjpeg': 'third_party/libjpeg/libjpeg.gyp',
'use_system_libpng': 'third_party/libpng/libpng.gyp',
'use_system_libusb': 'third_party/libusb/libusb.gyp',
'use_system_libvpx': 'third_party/libvpx/libvpx.gyp',
'use_system_libwebp': 'third_party/libwebp/libwebp.gyp',
'use_system_libxml': 'third_party/libxml/libxml.gyp',
'use_system_libxnvctrl' : 'third_party/libXNVCtrl/libXNVCtrl.gyp',
'use_system_libxslt': 'third_party/libxslt/libxslt.gyp',
'use_system_opus': 'third_party/opus/opus.gyp',
'use_system_protobuf': 'third_party/protobuf/protobuf.gyp',
'use_system_re2': 'third_party/re2/re2.gyp',
'use_system_snappy': 'third_party/snappy/snappy.gyp',
'use_system_speex': 'third_party/speex/speex.gyp',
'use_system_sqlite': 'third_party/sqlite/sqlite.gyp',
'use_system_v8': 'v8/tools/gyp/v8.gyp',
'use_system_zlib': 'third_party/zlib/zlib.gyp',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = optparse.OptionParser()
# Accept arguments in gyp command-line syntax, so that the caller can re-use
# command-line for this script and gyp.
parser.add_option('-D', dest='defines', action='append')
parser.add_option('--undo', action='store_true')
options, args = parser.parse_args(argv)
for flag, path in list(REPLACEMENTS.items()):
if '%s=1' % flag not in options.defines:
continue
if options.undo:
# Restore original file, and also remove the backup.
# This is meant to restore the source tree to its original state.
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
# Create a backup copy for --undo.
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
# Copy the gyp file from directory of this script to target path.
shutil.copyfile(os.path.join(my_dirname, os.path.basename(path)),
os.path.join(source_tree_root, path))
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv))
| bsd-3-clause | bf8368a7feb743986b00e72b3402d021 | 34.39759 | 78 | 0.679374 | 3.007165 | false | false | false | false |
flutter/buildroot | build/fuchsia/fidl_gen_cpp.py | 1 | 3070 | #!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generate C/C++ headers and source files from the set of FIDL files specified
in the meta.json manifest.
"""
import argparse
import collections
import json
import os
import subprocess
import sys
def GetFIDLFilesRecursive(libraries, sdk_base, path):
with open(path) as json_file:
parsed = json.load(json_file)
result = []
deps = parsed['deps']
for dep in deps:
dep_meta_json = os.path.abspath('%s/fidl/%s/meta.json' % (sdk_base, dep))
GetFIDLFilesRecursive(libraries, sdk_base, dep_meta_json)
libraries[parsed['name']] = result + parsed['sources']
def GetFIDLFilesByLibraryName(sdk_base, root):
libraries = collections.OrderedDict()
GetFIDLFilesRecursive(libraries, sdk_base, root)
return libraries
def main():
parser = argparse.ArgumentParser();
parser.add_argument('--fidlc-bin', dest='fidlc_bin', action='store', required=True)
parser.add_argument('--fidlgen-bin', dest='fidlgen_bin', action='store', required=False)
parser.add_argument('--sdk-base', dest='sdk_base', action='store', required=True)
parser.add_argument('--root', dest='root', action='store', required=True)
parser.add_argument('--json', dest='json', action='store', required=True)
parser.add_argument('--fidlgen-output-root', dest='fidlgen_output_root', action='store', required=False)
parser.add_argument('--output-c-tables', dest='output_c_tables', action='store', required=True)
parser.add_argument('--target-api-level', dest='target_api_level', action='store', required=False)
args = parser.parse_args()
assert os.path.exists(args.fidlc_bin)
# --fidlgen-bin and --fidlgen-output-root should be passed in together.
assert os.path.exists(args.fidlgen_bin or '') == bool(args.fidlgen_output_root)
fidl_files_by_name = GetFIDLFilesByLibraryName(args.sdk_base, args.root)
fidlc_command = [
args.fidlc_bin,
'--tables',
args.output_c_tables,
'--json',
args.json
]
if args.target_api_level:
fidlc_command += [
'--available',
'fuchsia:{api_level}'.format(api_level=args.target_api_level),
]
# Create an iterator that works on both python3 and python2
try:
fidl_files_by_name_iter = list(fidl_files_by_name.items())
except AttributeError:
fidl_files_by_name_iter = iter(fidl_files_by_name.items())
for _, fidl_files in fidl_files_by_name_iter:
fidlc_command.append('--files')
for fidl_file in fidl_files:
fidl_abspath = os.path.abspath('%s/%s' % (args.sdk_base, fidl_file))
fidlc_command.append(fidl_abspath)
subprocess.check_call(fidlc_command)
if args.fidlgen_output_root:
assert os.path.exists(args.json)
fidlgen_command = [
args.fidlgen_bin,
'-json',
args.json,
'-root',
args.fidlgen_output_root
]
subprocess.check_call(fidlgen_command)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | fcdbdce2c158a6d4681ff25757012829 | 30.326531 | 106 | 0.685016 | 3.234984 | false | false | false | false |
flutter/buildroot | build/symlink.py | 1 | 1176 | #!/usr/bin/env python3
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make a symlink and optionally touch a file (to handle dependencies)."""
import errno
import optparse
import os.path
import shutil
import sys
def Main(argv):
parser = optparse.OptionParser()
parser.add_option('-f', '--force', action='store_true')
parser.add_option('--touch')
options, args = parser.parse_args(argv[1:])
if len(args) < 2:
parser.error('at least two arguments required.')
target = args[-1]
sources = args[:-1]
for s in sources:
t = os.path.join(target, os.path.basename(s))
if len(sources) == 1 and not os.path.isdir(target):
t = target
try:
os.symlink(s, t)
except OSError as e:
if e.errno == errno.EEXIST and options.force:
if os.path.isdir(t):
shutil.rmtree(t, ignore_errors=True)
else:
os.remove(t)
os.symlink(s, t)
else:
raise
if options.touch:
with open(options.touch, 'w') as f:
pass
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| bsd-3-clause | 247e7a324be9553912698a19e923e242 | 28.4 | 74 | 0.636054 | 3.458824 | false | false | false | false |
flutter/buildroot | build/get_syzygy_binaries.py | 1 | 18089 | #!/usr/bin/env python3
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script for downloading versioned Syzygy binaries."""
import hashlib
import errno
import json
import logging
import optparse
import os
import re
import shutil
import stat
import sys
import subprocess
import tempfile
import time
import zipfile
_LOGGER = logging.getLogger(os.path.basename(__file__))
# The relative path where official builds are archived in their GS bucket.
_SYZYGY_ARCHIVE_PATH = ('/builds/official/%(revision)s')
# A JSON file containing the state of the download directory. If this file and
# directory state do not agree, then the binaries will be downloaded and
# installed again.
_STATE = '.state'
# This matches an integer (an SVN revision number) or a SHA1 value (a GIT hash).
# The archive exclusively uses lowercase GIT hashes.
_REVISION_RE = re.compile('^(?:\d+|[a-f0-9]{40})$')
# This matches an MD5 hash.
_MD5_RE = re.compile('^[a-f0-9]{32}$')
# List of reources to be downloaded and installed. These are tuples with the
# following format:
# (basename, logging name, relative installation path, extraction filter)
_RESOURCES = [
('benchmark.zip', 'benchmark', '', None),
('binaries.zip', 'binaries', 'exe', None),
('symbols.zip', 'symbols', 'exe',
lambda x: x.filename.endswith('.dll.pdb'))]
def _LoadState(output_dir):
"""Loads the contents of the state file for a given |output_dir|, returning
None if it doesn't exist.
"""
path = os.path.join(output_dir, _STATE)
if not os.path.exists(path):
_LOGGER.debug('No state file found.')
return None
with open(path, 'rb') as f:
_LOGGER.debug('Reading state file: %s', path)
try:
return json.load(f)
except ValueError:
_LOGGER.debug('Invalid state file.')
return None
def _SaveState(output_dir, state, dry_run=False):
"""Saves the |state| dictionary to the given |output_dir| as a JSON file."""
path = os.path.join(output_dir, _STATE)
_LOGGER.debug('Writing state file: %s', path)
if dry_run:
return
with open(path, 'wb') as f:
f.write(json.dumps(state, sort_keys=True, indent=2))
def _Md5(path):
"""Returns the MD5 hash of the file at |path|, which must exist."""
return hashlib.md5(open(path, 'rb').read()).hexdigest()
def _StateIsValid(state):
"""Returns true if the given state structure is valid."""
if not isinstance(state, dict):
_LOGGER.debug('State must be a dict.')
return False
r = state.get('revision', None)
if not isinstance(r, str) or not _REVISION_RE.match(r):
_LOGGER.debug('State contains an invalid revision.')
return False
c = state.get('contents', None)
if not isinstance(c, dict):
_LOGGER.debug('State must contain a contents dict.')
return False
for (relpath, md5) in c.items():
if not isinstance(relpath, str) or len(relpath) == 0:
_LOGGER.debug('State contents dict contains an invalid path.')
return False
if not isinstance(md5, str) or not _MD5_RE.match(md5):
_LOGGER.debug('State contents dict contains an invalid MD5 digest.')
return False
return True
def _BuildActualState(stored, revision, output_dir):
"""Builds the actual state using the provided |stored| state as a template.
Only examines files listed in the stored state, causing the script to ignore
files that have been added to the directories locally. |stored| must be a
valid state dictionary.
"""
contents = {}
state = { 'revision': revision, 'contents': contents }
for relpath, md5 in stored['contents'].items():
abspath = os.path.abspath(os.path.join(output_dir, relpath))
if os.path.isfile(abspath):
m = _Md5(abspath)
contents[relpath] = m
return state
def _StatesAreConsistent(stored, actual):
"""Validates whether two state dictionaries are consistent. Both must be valid
state dictionaries. Additional entries in |actual| are ignored.
"""
if stored['revision'] != actual['revision']:
_LOGGER.debug('Mismatched revision number.')
return False
cont_stored = stored['contents']
cont_actual = actual['contents']
for relpath, md5 in cont_stored.items():
if relpath not in cont_actual:
_LOGGER.debug('Missing content: %s', relpath)
return False
if md5 != cont_actual[relpath]:
_LOGGER.debug('Modified content: %s', relpath)
return False
return True
def _GetCurrentState(revision, output_dir):
"""Loads the current state and checks to see if it is consistent. Returns
a tuple (state, bool). The returned state will always be valid, even if an
invalid state is present on disk.
"""
stored = _LoadState(output_dir)
if not _StateIsValid(stored):
_LOGGER.debug('State is invalid.')
# Return a valid but empty state.
return ({'revision': '0', 'contents': {}}, False)
actual = _BuildActualState(stored, revision, output_dir)
# If the script has been modified consider the state invalid.
path = os.path.join(output_dir, _STATE)
if os.path.getmtime(__file__) > os.path.getmtime(path):
return (stored, False)
# Otherwise, explicitly validate the state.
if not _StatesAreConsistent(stored, actual):
return (stored, False)
return (stored, True)
def _DirIsEmpty(path):
"""Returns true if the given directory is empty, false otherwise."""
for root, dirs, files in os.walk(path):
return not dirs and not files
def _RmTreeHandleReadOnly(func, path, exc):
"""An error handling function for use with shutil.rmtree. This will
detect failures to remove read-only files, and will change their properties
prior to removing them. This is necessary on Windows as os.remove will return
an access error for read-only files, and git repos contain read-only
pack/index files.
"""
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
_LOGGER.debug('Removing read-only path: %s', path)
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
def _RmTree(path):
"""A wrapper of shutil.rmtree that handles read-only files."""
shutil.rmtree(path, ignore_errors=False, onerror=_RmTreeHandleReadOnly)
def _CleanState(output_dir, state, dry_run=False):
"""Cleans up files/directories in |output_dir| that are referenced by
the given |state|. Raises an error if there are local changes. Returns a
dictionary of files that were deleted.
"""
_LOGGER.debug('Deleting files from previous installation.')
deleted = {}
# Generate a list of files to delete, relative to |output_dir|.
contents = state['contents']
files = sorted(contents.keys())
# Try to delete the files. Keep track of directories to delete as well.
dirs = {}
for relpath in files:
fullpath = os.path.join(output_dir, relpath)
fulldir = os.path.dirname(fullpath)
dirs[fulldir] = True
if os.path.exists(fullpath):
# If somehow the file has become a directory complain about it.
if os.path.isdir(fullpath):
raise Exception('Directory exists where file expected: %s' % fullpath)
# Double check that the file doesn't have local changes. If it does
# then refuse to delete it.
if relpath in contents:
stored_md5 = contents[relpath]
actual_md5 = _Md5(fullpath)
if actual_md5 != stored_md5:
raise Exception('File has local changes: %s' % fullpath)
# The file is unchanged so it can safely be deleted.
_LOGGER.debug('Deleting file "%s".', fullpath)
deleted[relpath] = True
if not dry_run:
os.unlink(fullpath)
# Sort directories from longest name to shortest. This lets us remove empty
# directories from the most nested paths first.
dirs = sorted(list(dirs.keys()), key=lambda x: len(x), reverse=True)
for p in dirs:
if os.path.exists(p) and _DirIsEmpty(p):
_LOGGER.debug('Deleting empty directory "%s".', p)
if not dry_run:
_RmTree(p)
return deleted
def _FindGsUtil():
"""Looks for depot_tools and returns the absolute path to gsutil.py."""
for path in os.environ['PATH'].split(os.pathsep):
path = os.path.abspath(path)
git_cl = os.path.join(path, 'git_cl.py')
gs_util = os.path.join(path, 'gsutil.py')
if os.path.exists(git_cl) and os.path.exists(gs_util):
return gs_util
return None
def _GsUtil(*cmd):
"""Runs the given command in gsutil with exponential backoff and retries."""
gs_util = _FindGsUtil()
cmd = [sys.executable, gs_util] + list(cmd)
retries = 3
timeout = 4 # Seconds.
while True:
_LOGGER.debug('Running %s', cmd)
prog = subprocess.Popen(cmd, shell=False)
prog.communicate()
# Stop retrying on success.
if prog.returncode == 0:
return
# Raise a permanent failure if retries have been exhausted.
if retries == 0:
raise RuntimeError('Command "%s" returned %d.' % (cmd, prog.returncode))
_LOGGER.debug('Sleeping %d seconds and trying again.', timeout)
time.sleep(timeout)
retries -= 1
timeout *= 2
def _Download(resource):
"""Downloads the given GS resource to a temporary file, returning its path."""
tmp = tempfile.mkstemp(suffix='syzygy_archive')
os.close(tmp[0])
url = 'gs://syzygy-archive' + resource
_GsUtil('cp', url, tmp[1])
return tmp[1]
def _InstallBinaries(options, deleted={}):
"""Installs Syzygy binaries. This assumes that the output directory has
already been cleaned, as it will refuse to overwrite existing files."""
contents = {}
state = { 'revision': options.revision, 'contents': contents }
archive_path = _SYZYGY_ARCHIVE_PATH % { 'revision': options.revision }
if options.resources:
resources = [(resource, resource, '', None)
for resource in options.resources]
else:
resources = _RESOURCES
for (base, name, subdir, filt) in resources:
# Create the output directory if it doesn't exist.
fulldir = os.path.join(options.output_dir, subdir)
if os.path.isfile(fulldir):
raise Exception('File exists where a directory needs to be created: %s' %
fulldir)
if not os.path.exists(fulldir):
_LOGGER.debug('Creating directory: %s', fulldir)
if not options.dry_run:
os.makedirs(fulldir)
# Download and read the archive.
resource = archive_path + '/' + base
_LOGGER.debug('Retrieving %s archive at "%s".', name, resource)
path = _Download(resource)
_LOGGER.debug('Unzipping %s archive.', name)
with open(path, 'rb') as data:
archive = zipfile.ZipFile(data)
for entry in archive.infolist():
if not filt or filt(entry):
fullpath = os.path.normpath(os.path.join(fulldir, entry.filename))
relpath = os.path.relpath(fullpath, options.output_dir)
if os.path.exists(fullpath):
# If in a dry-run take into account the fact that the file *would*
# have been deleted.
if options.dry_run and relpath in deleted:
pass
else:
raise Exception('Path already exists: %s' % fullpath)
# Extract the file and update the state dictionary.
_LOGGER.debug('Extracting "%s".', fullpath)
if not options.dry_run:
archive.extract(entry.filename, fulldir)
md5 = _Md5(fullpath)
contents[relpath] = md5
if sys.platform == 'cygwin':
os.chmod(fullpath, os.stat(fullpath).st_mode | stat.S_IXUSR)
_LOGGER.debug('Removing temporary file "%s".', path)
os.remove(path)
return state
def _ParseCommandLine():
"""Parses the command-line and returns an options structure."""
option_parser = optparse.OptionParser()
option_parser.add_option('--dry-run', action='store_true', default=False,
help='If true then will simply list actions that would be performed.')
option_parser.add_option('--force', action='store_true', default=False,
help='Force an installation even if the binaries are up to date.')
option_parser.add_option('--no-cleanup', action='store_true', default=False,
help='Allow installation on non-Windows platforms, and skip the forced '
'cleanup step.')
option_parser.add_option('--output-dir', type='string',
help='The path where the binaries will be replaced. Existing binaries '
'will only be overwritten if not up to date.')
option_parser.add_option('--overwrite', action='store_true', default=False,
help='If specified then the installation will happily delete and rewrite '
'the entire output directory, blasting any local changes.')
option_parser.add_option('--revision', type='string',
help='The SVN revision or GIT hash associated with the required version.')
option_parser.add_option('--revision-file', type='string',
help='A text file containing an SVN revision or GIT hash.')
option_parser.add_option('--resource', type='string', action='append',
dest='resources', help='A resource to be downloaded.')
option_parser.add_option('--verbose', dest='log_level', action='store_const',
default=logging.INFO, const=logging.DEBUG,
help='Enables verbose logging.')
option_parser.add_option('--quiet', dest='log_level', action='store_const',
default=logging.INFO, const=logging.ERROR,
help='Disables all output except for errors.')
options, args = option_parser.parse_args()
if args:
option_parser.error('Unexpected arguments: %s' % args)
if not options.output_dir:
option_parser.error('Must specify --output-dir.')
if not options.revision and not options.revision_file:
option_parser.error('Must specify one of --revision or --revision-file.')
if options.revision and options.revision_file:
option_parser.error('Must not specify both --revision and --revision-file.')
# Configure logging.
logging.basicConfig(level=options.log_level)
# If a revision file has been specified then read it.
if options.revision_file:
options.revision = open(options.revision_file, 'rb').read().strip()
_LOGGER.debug('Parsed revision "%s" from file "%s".',
options.revision, options.revision_file)
# Ensure that the specified SVN revision or GIT hash is valid.
if not _REVISION_RE.match(options.revision):
option_parser.error('Must specify a valid SVN or GIT revision.')
# This just makes output prettier to read.
options.output_dir = os.path.normpath(options.output_dir)
return options
def _RemoveOrphanedFiles(options):
"""This is run on non-Windows systems to remove orphaned files that may have
been downloaded by a previous version of this script.
"""
# Reconfigure logging to output info messages. This will allow inspection of
# cleanup status on non-Windows buildbots.
_LOGGER.setLevel(logging.INFO)
output_dir = os.path.abspath(options.output_dir)
# We only want to clean up the folder in 'src/third_party/syzygy', and we
# expect to be called with that as an output directory. This is an attempt to
# not start deleting random things if the script is run from an alternate
# location, or not called from the gclient hooks.
expected_syzygy_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'third_party', 'syzygy'))
expected_output_dir = os.path.join(expected_syzygy_dir, 'binaries')
if expected_output_dir != output_dir:
_LOGGER.info('Unexpected output directory, skipping cleanup.')
return
if not os.path.isdir(expected_syzygy_dir):
_LOGGER.info('Output directory does not exist, skipping cleanup.')
return
def OnError(function, path, excinfo):
"""Logs error encountered by shutil.rmtree."""
_LOGGER.error('Error when running %s(%s)', function, path, exc_info=excinfo)
_LOGGER.info('Removing orphaned files from %s', expected_syzygy_dir)
if not options.dry_run:
shutil.rmtree(expected_syzygy_dir, True, OnError)
def main():
options = _ParseCommandLine()
if options.dry_run:
_LOGGER.debug('Performing a dry-run.')
# We only care about Windows platforms, as the Syzygy binaries aren't used
# elsewhere. However, there was a short period of time where this script
# wasn't gated on OS types, and those OSes downloaded and installed binaries.
# This will cleanup orphaned files on those operating systems.
if sys.platform not in ('win32', 'cygwin'):
if options.no_cleanup:
_LOGGER.debug('Skipping usual cleanup for non-Windows platforms.')
else:
return _RemoveOrphanedFiles(options)
# Load the current installation state, and validate it against the
# requested installation.
state, is_consistent = _GetCurrentState(options.revision, options.output_dir)
# Decide whether or not an install is necessary.
if options.force:
_LOGGER.debug('Forcing reinstall of binaries.')
elif is_consistent:
# Avoid doing any work if the contents of the directory are consistent.
_LOGGER.debug('State unchanged, no reinstall necessary.')
return
# Under normal logging this is the only only message that will be reported.
_LOGGER.info('Installing revision %s Syzygy binaries.',
options.revision[0:12])
# Clean up the old state to begin with.
deleted = []
if options.overwrite:
if os.path.exists(options.output_dir):
# If overwrite was specified then take a heavy-handed approach.
_LOGGER.debug('Deleting entire installation directory.')
if not options.dry_run:
_RmTree(options.output_dir)
else:
# Otherwise only delete things that the previous installation put in place,
# and take care to preserve any local changes.
deleted = _CleanState(options.output_dir, state, options.dry_run)
# Install the new binaries. In a dry-run this will actually download the
# archives, but it won't write anything to disk.
state = _InstallBinaries(options, deleted)
# Build and save the state for the directory.
_SaveState(options.output_dir, state, options.dry_run)
if __name__ == '__main__':
main()
| bsd-3-clause | c2f4a9787dad59c23a43b5eecc5d995b | 36.067623 | 80 | 0.686273 | 3.807409 | false | false | false | false |
flutter/buildroot | build/landmine_utils.py | 1 | 3015 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import os
import shlex
import sys
def memoize(default=None):
"""This decorator caches the return value of a parameterless pure function"""
def memoizer(func):
val = []
@functools.wraps(func)
def inner():
if not val:
ret = func()
val.append(ret if ret is not None else default)
if logging.getLogger().isEnabledFor(logging.INFO):
print('%s -> %r' % (func.__name__, val[0]))
return val[0]
return inner
return memoizer
@memoize()
def IsWindows():
return sys.platform in ['win32', 'cygwin']
@memoize()
def IsLinux():
return sys.platform.startswith(('linux', 'freebsd', 'openbsd'))
@memoize()
def IsMac():
return sys.platform == 'darwin'
@memoize()
def gyp_defines():
"""Parses and returns GYP_DEFINES env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
@memoize()
def gyp_generator_flags():
"""Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
@memoize()
def distributor():
"""
Returns a string which is the distributed build engine in use (if any).
Possible values: 'goma', 'ib', ''
"""
if 'goma' in gyp_defines():
return 'goma'
elif IsWindows():
if 'CHROME_HEADLESS' in os.environ:
return 'ib' # use (win and !goma and headless) as approximation of ib
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():
if 'android' in gyp_defines()['OS']:
return 'android'
else:
return gyp_defines()['OS']
elif IsWindows():
return 'win'
elif IsLinux():
return 'linux'
else:
return 'mac'
@memoize()
def builder():
"""
Returns a string representing the build engine (not compiler) to use.
Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
"""
if 'GYP_GENERATORS' in os.environ:
# for simplicity, only support the first explicit generator
generator = os.environ['GYP_GENERATORS'].split(',')[0]
if generator.endswith('-android'):
return generator.split('-')[0]
elif generator.endswith('-ninja'):
return 'ninja'
else:
return generator
else:
if platform() == 'android':
# Good enough for now? Do any android bots use make?
return 'ninja'
elif platform() == 'ios':
return 'xcode'
elif IsWindows():
return 'ninja'
elif IsLinux():
return 'ninja'
elif IsMac():
return 'ninja'
else:
assert False, 'Don\'t know what builder we\'re using!'
| bsd-3-clause | a20e3ec2207e7828493ea344c2effe91 | 24.125 | 79 | 0.636816 | 3.663426 | false | false | false | false |
flutter/buildroot | tools/dart/dart_roll_helper.py | 1 | 13149 | #!/usr/bin/env python3 -u
#
# Copyright 2018 The Dart project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script automates the Dart SDK roll steps, including:
# - Updating the Dart revision in DEPS
# - Updating the Dart dependencies in DEPS
# - Syncing dependencies with 'gclient sync'
# - Generating GN files for relevant engine configurations
# - Building relevant engine configurations
# - Running tests in 'example/flutter_gallery' and 'packages/flutter'
# - Launching flutter_gallery in release and debug mode
# - Running license.sh and updating license files in
# 'flutter/ci/licenses_golden'
# - Generating a commit with relevant Dart SDK commit logs (optional)
#
# The following environment variables can be set instead of being passed as
# arguments:
# - FLUTTER_HOME: the absolute path to the 'flutter' directory
# - ENGINE_HOME: the absolute path to the 'engine/src' directory
# - DART_SDK_HOME: the absolute path to the root of a Dart SDK project
from dart_roll_utils import *
import argparse
import atexit
import datetime
import fileinput
import os
import platform
import signal
import shutil
import subprocess
import sys
DART_REVISION_ENTRY = 'dart_revision'
FLUTTER = '{}/bin/flutter'.format(flutter_home())
FLUTTER_DOCTOR = [FLUTTER, 'doctor']
FLUTTER_RUN = [FLUTTER, 'run']
FLUTTER_TEST = [FLUTTER, 'test']
MAX_GCLIENT_RETRIES = 3
# Returned when licenses do not require updating.
LICENSE_SCRIPT_OKAY = 0
# Returned when licenses require updating.
LICENSE_SCRIPT_UPDATES = 1
# Returned when either 'pub' or 'dart' isn't in the path.
LICENSE_SCRIPT_EXIT_ERROR = 127
CURRENT_SUBPROCESS = None
def update_dart_revision(dart_revision):
original_revision = ''
print_status('Updating Dart revision to {}'.format(dart_revision))
content = get_deps()
for idx, line in enumerate(content):
if DART_REVISION_ENTRY in line:
original_revision = line.strip().split(' ')[1][1:-2]
if not is_ancestor_commit(original_revision,
dart_revision,
dart_sdk_home()):
print_error('Dart revision {} is older than existing revision, {}.' +
' Aborting roll.'.format(dart_revision, original_revision))
sys.exit(ERROR_OLD_COMMIT_PROVIDED)
content[idx] = " 'dart_revision': '" + dart_revision + "',\n"
break
write_deps(content)
return original_revision
def run_process(args, cwd=None, stdout=None):
global CURRENT_SUBPROCESS
CURRENT_SUBPROCESS = subprocess.Popen(args, cwd=cwd, stdout=stdout)
exit_code = CURRENT_SUBPROCESS.wait()
CURRENT_SUBPROCESS = None
return exit_code
def gclient_sync():
global CURRENT_SUBPROCESS
exit_code = None
num_retries = 0
while ((exit_code != 0) and not (num_retries >= MAX_GCLIENT_RETRIES)):
print_status('Running gclient sync (Attempt {}/{})'
.format(num_retries + 1, MAX_GCLIENT_RETRIES))
exit_code = run_process(['gclient', 'sync', '--delete_unversioned_trees'],
cwd=engine_home())
if exit_code != 0:
num_retries += 1
if num_retries == MAX_GCLIENT_RETRIES:
print_error('Max number of gclient sync retries attempted. Aborting roll.')
sys.exit(ERROR_GCLIENT_SYNC_FAILED)
def get_deps():
with open(flutter_deps_path(), 'r') as f:
content = f.readlines()
return content
def update_deps():
print_status('Updating Dart dependencies')
run_process([update_dart_deps_path()], cwd=engine_home())
def write_deps(newdeps):
with open(flutter_deps_path(), 'w') as f:
f.write(''.join(newdeps))
def run_gn():
print_status('Generating build files')
common = [os.path.join('flutter', 'tools', 'gn'), '--goma', '--full-dart-sdk']
debug = ['--runtime-mode=debug']
profile = ['--runtime-mode=profile']
release = ['--runtime-mode=release']
runtime_modes = [debug, profile, release]
unopt = ['--unoptimized']
android = ['--android']
fuchsia = ['--fuchsia']
for mode in runtime_modes:
if set(mode) != set(release):
run_process(common + android + unopt + mode, cwd=engine_home())
run_process(common + android + mode, cwd=engine_home())
host = common[:]
if set(mode) == set(debug):
host += unopt
run_process(host + mode, cwd=engine_home())
run_process(common + fuchsia + debug + unopt, cwd=engine_home())
def build():
print_status('Building Flutter engine')
command = ['ninja', '-j1000']
configs = [
'host_debug_unopt',
'host_release',
'host_profile',
'android_debug_unopt',
'android_profile',
'android_release',
'fuchsia_debug_unopt_x64',
]
build_dir = 'out'
if platform.system() == 'Darwin':
build_dir = 'xcodebuild'
for config in configs:
error_code = run_process(command + ['-C', os.path.join(build_dir, config)],
cwd=engine_home())
if error_code != 0:
print_error('Build failure for configuration "' +
config +
'". Aborting roll.')
sys.exit(ERROR_BUILD_FAILED)
def run_flutter_doctor():
print_status('Running flutter doctor')
engine_src_path = '--local-engine-src-path={}'.format(engine_home())
result = run_process(FLUTTER_DOCTOR + ['--local-engine=host_debug_unopt',
engine_src_path],
cwd=package_flutter_path())
if result != 0:
print_error('flutter doctor failed. Aborting roll.')
sys.exit(ERROR_FLUTTER_DOCTOR_FAILED)
def run_tests():
print_status('Running tests in packages/flutter')
engine_src_path = '--local-engine-src-path={}'.format(engine_home())
result = run_process(FLUTTER_TEST + ['--local-engine=host_debug_unopt',
engine_src_path],
cwd=package_flutter_path())
if result != 0:
print_error('package/flutter tests failed. Aborting roll.')
sys.exit(ERROR_PKG_FLUTTER_FAILED)
print_status('Running tests in examples/flutter_gallery')
result = run_process(FLUTTER_TEST + ['--local-engine=host_debug_unopt',
engine_src_path,
'--disable-service-auth-codes'],
cwd=flutter_gallery_path());
if result != 0:
print_error('flutter_gallery tests failed. Aborting roll.')
sys.exit(ERROR_FLUTTER_GALLERY_FAILED)
def run_hot_reload_configurations():
print_status('Running flutter gallery release')
engine_src_path = '--local-engine-src-path={}'.format(engine_home())
run_process(FLUTTER_RUN + ['--release',
'--local-engine=android_release',
engine_src_path],
cwd=flutter_gallery_path())
print_status('Running flutter gallery debug')
run_process(FLUTTER_RUN + ['--local-engine=android_debug_unopt',
engine_src_path],
cwd=flutter_gallery_path())
def update_licenses():
print_status('Updating Flutter licenses')
result = run_process([engine_license_script_path()], cwd=engine_home())
if result == LICENSE_SCRIPT_EXIT_ERROR:
print_error('License script failed to run. Is the Dart SDK (specifically' +
' dart and pub) in your path? Aborting roll.')
sys.exit(ERROR_LICENSE_SCRIPT_FAILED)
elif (result != LICENSE_SCRIPT_OKAY) and (result != LICENSE_SCRIPT_UPDATES):
print_error('Unknown license script error: {}. Aborting roll.'
.format(result))
sys.exit(ERROR_LICENSE_SCRIPT_FAILED)
# Ignore 'licenses_skia' as they shouldn't change during a Dart SDK roll.
src_files = ['licenses_flutter', 'licenses_third_party', 'tool_signature']
for f in src_files:
path = os.path.join(license_script_output_path(), f)
if os.path.isfile(path):
shutil.copy(path, engine_golden_licenses_path())
run_process(['pub', 'get'], cwd=engine_license_script_package_path())
gclient_sync()
# Update the LICENSE file.
with open(sky_license_file_path(), 'w') as sky_license:
run_process(['dart', os.path.join('lib', 'main.dart'),
'--release', '--src', engine_home(),
'--out', engine_license_script_output_path()],
cwd=engine_license_script_package_path(),
stdout=sky_license)
def get_commit_range(start, finish):
range_str = '{}..{}'.format(start, finish)
command = ['git', 'log', '--oneline', range_str]
orig_dir = os.getcwd()
os.chdir(dart_sdk_home())
result = subprocess.check_output(command).decode('utf-8')
result = '\n'.join(['dart-lang/sdk@' + l for l in result.splitlines()])
os.chdir(orig_dir)
return result
def get_short_rev(rev):
command = ['git', 'rev-parse', '--short', rev]
orig_dir = os.getcwd()
os.chdir(dart_sdk_home())
result = subprocess.check_output(command).decode('utf-8').rstrip()
os.chdir(orig_dir)
return result
def git_commit(original_revision, updated_revision):
print_status('Committing Dart SDK roll')
current_date = datetime.date.today()
sdk_log = get_commit_range(original_revision, updated_revision)
num_commits = len(sdk_log.splitlines())
commit_msg = ('Roll src/third_party/dart {}..{} ({} commits)'
.format(get_short_rev(original_revision),
get_short_rev(updated_revision), num_commits))
commit_msg += '\n\n' + sdk_log
commit_cmd = ['git', 'commit', '-a', '-m', commit_msg]
run_process(commit_cmd, cwd=engine_flutter_path())
def update_roots(args):
if args.flutter_home:
set_flutter_home(args.flutter_home)
if args.engine_home:
set_engine_home(args.engine_home)
if args.dart_sdk_home:
set_dart_sdk_home(args.dart_sdk_home)
if flutter_home() == '':
print_error('Either "--flutter-home" must be provided or FLUTTER_HOME must' +
' be set. Aborting roll.')
sys.exit(ERROR_MISSING_ROOTS)
if engine_home() == '':
print_error('Either "--engine-home" must be provided or ENGINE_HOME must' +
' be set. Aborting roll.')
sys.exit(ERROR_MISSING_ROOTS)
if dart_sdk_home() == '':
print_error('Either "--dart-sdk-home" must be provided or DART_SDK_HOME ' +
'must be set. Aborting roll.')
sys.exit(ERROR_MISSING_ROOTS)
def sys_exit(signal, frame):
sys.exit()
def cleanup_children():
if CURRENT_SUBPROCESS != None:
CURRENT_SUBPROCESS.terminate()
def main():
parser = argparse.ArgumentParser(description='Automate most Dart SDK roll tasks.')
parser.add_argument('--dart-sdk-home', help='Path to the Dart SDK ' +
'repository. Overrides DART_SDK_HOME environment variable')
parser.add_argument('dart_sdk_revision', help='Target Dart SDK revision')
parser.add_argument('--create-commit', action='store_true',
help='Create the engine commit with Dart SDK commit log')
parser.add_argument('--engine-home', help='Path to the Flutter engine ' +
'repository. Overrides ENGINE_HOME environment variable')
parser.add_argument('--flutter-home', help='Path to the Flutter framework ' +
'repository. Overrides FLUTTER_HOME environment variable')
parser.add_argument('--no-build', action='store_true',
help='Skip rebuilding the Flutter engine')
parser.add_argument('--no-hot-reload', action='store_true',
help="Skip hot reload testing")
parser.add_argument('--no-test', action='store_true',
help='Skip running host tests for package/flutter and ' +
'flutter_gallery')
parser.add_argument('--no-update-deps', action='store_true',
help='Skip updating DEPS file')
parser.add_argument('--no-update-licenses', action='store_true',
help='Skip updating licenses')
args = parser.parse_args()
atexit.register(cleanup_children)
signal.signal(signal.SIGTERM, sys_exit)
original_revision = None
updated_revision = args.dart_sdk_revision
# Disable buffering of log output
os.environ["PYTHONUNBUFFERED"] = "1"
update_roots(args)
print_status('Starting Dart SDK roll')
if not args.no_update_deps:
original_revision = update_dart_revision(updated_revision)
gclient_sync()
update_deps()
gclient_sync()
if not args.no_build:
run_gn()
build()
if ((not args.no_test) or (not args.no_hot_reload)):
run_flutter_doctor()
if not args.no_test:
run_tests()
if not args.no_hot_reload:
run_hot_reload_configurations()
if not args.no_update_licenses:
update_licenses()
if args.create_commit:
if original_revision == None:
print_warning('"original_revision" not specified. Skipping commit.')
print_warning('This happens when the "--no_update_deps" argument is ' +
'provided')
else:
git_commit(original_revision, updated_revision)
print_status('Dart SDK roll complete!')
if __name__ == '__main__':
main()
| bsd-3-clause | 1bd0a4afdfba0605df11f46601cb0baf | 34.92623 | 84 | 0.63617 | 3.594587 | false | false | false | false |
niwinz/django-jinja | django_jinja/views/__init__.py | 2 | 1916 | from django import http
from django.template import loader
from django.views import View
from ..base import get_match_extension
class GenericView(View):
response_cls = http.HttpResponse
content_type = "text/html"
tmpl_name = None
def get_context_data(self):
return {"view": self}
def get(self, request, *args, **kwargs):
context = self.get_context_data()
template_name = callable(self.tmpl_name) and self.tmpl_name() or self.tmpl_name
output = loader.render_to_string(template_name, context, request=request)
return self.response_cls(output, content_type=self.content_type)
class ErrorView(GenericView):
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
class PageNotFound(ErrorView):
response_cls = http.HttpResponseNotFound
def tmpl_name(self):
return "404" + (get_match_extension() or ".jinja")
class PermissionDenied(ErrorView):
response_cls = http.HttpResponseForbidden
def tmpl_name(self):
return "403" + (get_match_extension() or ".jinja")
class BadRequest(ErrorView):
response_cls = http.HttpResponseBadRequest
def tmpl_name(self):
return "400" + (get_match_extension() or ".jinja")
class ServerError(ErrorView):
response_cls = http.HttpResponseServerError
def tmpl_name(self):
return "500" + (get_match_extension() or ".jinja")
| bsd-3-clause | 7be6ed9492694f9f77825a707104383c | 27.176471 | 87 | 0.66023 | 3.691715 | false | false | false | false |
dask/dask-ml | tests/test_spectral_clustering.py | 1 | 3325 | from functools import partial
import numpy as np
import pytest
import sklearn.cluster
from dask_ml import metrics
from dask_ml.cluster import SpectralClustering
from dask_ml.datasets import make_blobs
X, y = make_blobs(n_samples=200, chunks=100, random_state=0)
@pytest.mark.parametrize("as_ndarray", [False, True])
@pytest.mark.parametrize("persist_embedding", [True, False])
def test_basic(as_ndarray, persist_embedding):
sc = SpectralClustering(
n_components=25, random_state=0, persist_embedding=persist_embedding
)
if as_ndarray:
X_ = X.compute()
else:
X_ = X
sc.fit(X_)
assert len(sc.labels_) == len(X_)
@pytest.mark.parametrize(
"assign_labels", [sklearn.cluster.KMeans(n_init=2), "sklearn-kmeans"]
)
def test_sklearn_kmeans(assign_labels):
sc = SpectralClustering(
n_components=25,
random_state=0,
assign_labels=assign_labels,
kmeans_params={"n_clusters": 8},
)
sc.fit(X)
assert isinstance(sc.assign_labels_, sklearn.cluster.KMeans)
@pytest.mark.skip(reason="Can't reproduce CI failure.")
def test_callable_affinity():
affinity = partial(
metrics.pairwise.pairwise_kernels,
metric="rbf",
filter_params=True,
gamma=1.0 / len(X),
)
sc = SpectralClustering(affinity=affinity, gamma=None)
sc.fit(X)
def test_n_components_raises():
sc = SpectralClustering(n_components=len(X))
with pytest.raises(ValueError) as m:
sc.fit(X)
assert m.match("n_components")
def test_assign_labels_raises():
sc = SpectralClustering(assign_labels="foo")
with pytest.raises(ValueError) as m:
sc.fit(X)
assert m.match("Unknown 'assign_labels' 'foo'")
sc = SpectralClustering(assign_labels=dict())
with pytest.raises(TypeError) as m:
sc.fit(X)
assert m.match("Invalid type ")
def test_affinity_raises():
sc = SpectralClustering(affinity="foo")
with pytest.raises(ValueError) as m:
sc.fit(X)
assert m.match("Unknown affinity metric name 'foo'")
sc = SpectralClustering(affinity=np.array([]))
with pytest.raises(TypeError) as m:
sc.fit(X)
assert m.match("Unexpected type for affinity 'ndarray'")
def test_spectral_clustering(Xl_blobs_easy):
X, y = Xl_blobs_easy
X = (X - X.mean(0)) / X.std(0)
model = SpectralClustering(
random_state=0, n_clusters=3, n_components=5, gamma=None
).fit(X)
labels = model.labels_.compute()
y = y.compute()
idx = [(y == i).argmax() for i in range(3)]
grouped_idx = [np.where(y == y[idx[i]])[0] for i in range(3)]
for indices in grouped_idx:
assert len(set(labels[indices])) == 1
@pytest.mark.parametrize("keep", [[4, 7], [4, 5], [0, 3], [1, 9], [0, 1, 5, 8, 9]])
def test_slice_mostly_sorted(keep):
import dask.array as da
import numpy as np
from dask.array.utils import assert_eq
from dask_ml.cluster.spectral import _slice_mostly_sorted
X = np.arange(10).reshape(-1, 1)
dX = da.from_array(X, chunks=5)
keep = np.array(keep).ravel()
rest = ~np.isin(X, keep).ravel()
array = dX[np.concatenate([X[keep], X[rest]]).ravel()]
result = _slice_mostly_sorted(array, keep, rest)
assert_eq(result, X)
assert all(x > 0 for x in result.chunks[0])
| bsd-3-clause | 8d28f150874ae3f8e3a10d4d23eb7c40 | 26.254098 | 83 | 0.64391 | 3.234436 | false | true | false | false |
dials/dials | src/dials/algorithms/merging/merge.py | 1 | 16432 | """Merging functions for experiment lists and reflection tables."""
from __future__ import annotations
import logging
from contextlib import contextmanager
from io import StringIO
from typing import Optional, Tuple
import numpy as np
from dxtbx.model import ExperimentList
from iotbx import mtz, phil
from mmtbx.scaling import data_statistics
from dials.algorithms.merging.reporting import (
MergeJSONCollector,
MergingStatisticsData,
make_dano_table,
)
from dials.algorithms.scaling.Ih_table import (
_reflection_table_to_iobs,
map_indices_to_asu,
)
from dials.algorithms.scaling.scaling_library import (
merging_stats_from_scaled_array,
scaled_data_as_miller_array,
)
from dials.algorithms.scaling.scaling_utilities import DialsMergingStatisticsError
from dials.algorithms.symmetry.absences.run_absences_checks import (
run_systematic_absences_checks,
)
from dials.array_family import flex
from dials.util.export_mtz import MADMergedMTZWriter, MergedMTZWriter
from dials.util.filter_reflections import filter_reflection_table
from .french_wilson import french_wilson
logger = logging.getLogger("dials")
@contextmanager
def collect_html_data_from_merge():
try:
html_collector = MergeJSONCollector()
yield html_collector
finally:
html_collector.reset()
def prepare_merged_reflection_table(
experiments,
reflection_table,
d_min=None,
d_max=None,
partiality_threshold=0.99,
):
"""Filter the data and prepare a reflection table with merged data."""
if (
"inverse_scale_factor" in reflection_table
and "intensity.scale.value" in reflection_table
):
logger.info("Performing systematic absence checks on scaled data")
reflections = filter_reflection_table(
reflection_table,
intensity_choice=["scale"],
d_min=d_min,
d_max=d_max,
partiality_threshold=partiality_threshold,
)
reflections["intensity"] = reflections["intensity.scale.value"]
reflections["variance"] = reflections["intensity.scale.variance"]
elif "intensity.prf.value" in reflection_table:
logger.info(
"Performing systematic absence checks on unscaled profile-integrated data"
)
reflections = filter_reflection_table(
reflection_table,
intensity_choice=["profile"],
d_min=d_min,
d_max=d_max,
partiality_threshold=partiality_threshold,
)
reflections["intensity"] = reflections["intensity.prf.value"]
reflections["variance"] = reflections["intensity.prf.variance"]
else:
logger.info(
"Performing systematic absence checks on unscaled summation-integrated data"
)
reflections = filter_reflection_table(
reflection_table,
intensity_choice=["sum"],
d_min=d_min,
d_max=d_max,
partiality_threshold=partiality_threshold,
)
reflections["intensity"] = reflections["intensity.sum.value"]
reflections["variance"] = reflections["intensity.sum.variance"]
# now merge
space_group = experiments[0].crystal.get_space_group()
reflections["asu_miller_index"] = map_indices_to_asu(
reflections["miller_index"], space_group
)
reflections["inverse_scale_factor"] = flex.double(reflections.size(), 1.0)
merged = (
_reflection_table_to_iobs(
reflections, experiments[0].crystal.get_unit_cell(), space_group
)
.merge_equivalents(use_internal_variance=False)
.array()
)
merged_reflections = flex.reflection_table()
merged_reflections["intensity"] = merged.data()
merged_reflections["variance"] = flex.pow2(merged.sigmas())
merged_reflections["miller_index"] = merged.indices()
return merged_reflections
class MTZDataClass:
"""Container class (i.e. Python 3.7 dataclass) for per-wavelength mtz dataset."""
def __init__(
self,
wavelength=0.0,
project_name="AUTOMATIC",
dataset_name="NATIVE",
crystal_name="XTAL",
merged_array=None,
merged_anomalous_array=None,
amplitudes=None,
anomalous_amplitudes=None,
dano=None,
multiplicities=None,
):
self.wavelength = wavelength
self.project_name = project_name
self.dataset_name = dataset_name
self.crystal_name = crystal_name
self.merged_array = merged_array
self.merged_anomalous_array = merged_anomalous_array
self.amplitudes = amplitudes
self.anomalous_amplitudes = anomalous_amplitudes
self.dano = dano
self.multiplicities = multiplicities
def make_merged_mtz_file(mtz_datasets):
"""
Make an mtz file object for the data, adding the date, time and program.
For multi-wavelength data, each wavelength is added as a new crystal.
Args:
mtz_datasets: A list of MTZDataClass objects, one per wavelength of the
experiment.
Returns:
An iotbx mtz file object.
"""
if len(mtz_datasets) > 1:
writer = MADMergedMTZWriter
else:
writer = MergedMTZWriter
mtz_writer = writer(
mtz_datasets[0].merged_array.space_group(),
mtz_datasets[0].merged_array.unit_cell(),
)
#### Add each wavelength as a new crystal.
for dataset in mtz_datasets:
mtz_writer.add_crystal(
crystal_name=dataset.crystal_name, project_name=dataset.project_name
)
mtz_writer.add_empty_dataset(dataset.wavelength, name=dataset.dataset_name)
mtz_writer.add_dataset(
dataset.merged_array,
dataset.merged_anomalous_array,
dataset.amplitudes,
dataset.anomalous_amplitudes,
dataset.dano,
dataset.multiplicities,
)
return mtz_writer.mtz_file
def merge_scaled_array(
experiments,
scaled_array,
anomalous=True,
use_internal_variance=False,
assess_space_group=False,
n_bins=20,
):
# assumes filtering already done and converted to combined scaled array
# Note, merge_equivalents does not raise an error if data is unique.
merged = scaled_array.merge_equivalents(use_internal_variance=use_internal_variance)
merged_anom = None
if anomalous:
anomalous_scaled = scaled_array.as_anomalous_array()
merged_anom = anomalous_scaled.merge_equivalents(
use_internal_variance=use_internal_variance
)
# Before merge, do assessment of the space_group
if assess_space_group:
merged_reflections = flex.reflection_table()
merged_reflections["intensity"] = merged.array().data()
merged_reflections["variance"] = flex.pow2(merged.array().sigmas())
merged_reflections["miller_index"] = merged.array().indices()
logger.info("Running systematic absences check")
run_systematic_absences_checks(experiments, merged_reflections)
stats_data = MergingStatisticsData(experiments, scaled_array)
try:
stats, anom_stats = merging_stats_from_scaled_array(
scaled_array,
n_bins,
use_internal_variance,
)
except DialsMergingStatisticsError as e:
logger.error(e, exc_info=True)
else:
stats_data.merging_statistics_result = stats
stats_data.anom_merging_statistics_result = anom_stats
return merged, merged_anom, stats_data
def merge(
experiments,
reflections,
d_min=None,
d_max=None,
combine_partials=True,
partiality_threshold=0.4,
best_unit_cell=None,
anomalous=True,
use_internal_variance=False,
assess_space_group=False,
n_bins=20,
):
"""
Merge reflection table data and generate a summary of the merging statistics.
This procedure filters the input data, merges the data (normal and optionally
anomalous), assesses the space group symmetry and generates a summary
of the merging statistics.
Returns two merge_equivalents objects and a statistics summary.
"""
logger.info("\nMerging scaled reflection data\n")
# first filter bad reflections using dials.util.filter methods
reflections = filter_reflection_table(
reflections,
intensity_choice=["scale"],
d_min=d_min,
d_max=d_max,
combine_partials=combine_partials,
partiality_threshold=partiality_threshold,
)
# ^ scale factor has been applied, so now set to 1.0 - okay as not
# going to output scale factor in merged mtz.
reflections["inverse_scale_factor"] = flex.double(reflections.size(), 1.0)
scaled_array = scaled_data_as_miller_array(
[reflections], experiments, best_unit_cell
)
return merge_scaled_array(
experiments,
scaled_array,
anomalous,
use_internal_variance,
assess_space_group,
n_bins,
)
def show_wilson_scaling_analysis(merged_intensities, n_residues=200):
"""
Report the wilson statistics for a merged intensity array
Args:
merged_intensities: A merged miller intensity array.
n_residues: The number of residues to use for the wilson analysis.
"""
if not merged_intensities.space_group().is_centric():
try:
wilson_scaling = data_statistics.wilson_scaling(
miller_array=merged_intensities, n_residues=n_residues
)
except (IndexError, RuntimeError) as e:
logger.error(
"\n"
"Error encountered during Wilson statistics calculation:\n"
"Perhaps there are too few unique reflections.\n"
"%s",
e,
exc_info=True,
)
else:
# Divert output through logger - do with StringIO rather than
# info_handle else get way too much whitespace in output.
out = StringIO()
wilson_scaling.show(out=out)
logger.info(out.getvalue())
return wilson_scaling.iso_b_wilson
return None
def truncate(
merged_intensities,
implementation: str = "dials",
min_reflections: int = 200,
fallback_to_flat_prior: bool = True,
):
"""
Perform French-Wilson truncation procedure on merged intensities.
Args:
merged_intensities (miller.array): A merged miller intensity array (normal or anomalous)
implementation (str): Choice of implementation of French & Wilson algorithm, either
"dials" or "cctbx"
min_reflections (int): Minimum number of reflections to perform the French & Wilson
procedure
fallback_to_flat_prior (bool): Fallback to assumption of a flat, positive prior,
if the number of reflections are fewer than min_reflections,
i.e. |F| = sqrt((Io+sqrt(Io**2 +2sigma**2))/2.0)
Returns:
(tuple): tuple containing:
amplitudes: A normal all-positive miller amplitude array
anom_amplitudes: An anomalous all-positive amplitude array, if the
input array has the anomalous_flag set, else None.
dano: The array of anomalous differences, if the input array has the
anomalous_flag set, else None.
"""
logger.info("\nPerforming French-Wilson treatment of scaled intensities")
out = StringIO()
n_refl = merged_intensities.size()
if n_refl < min_reflections and fallback_to_flat_prior:
logger.info(
"Insufficient reflections for French & Wilson procedure, "
"falling back to assumption of a flat, positive prior, i.e.: "
" |F| = sqrt((Io+sqrt(Io**2 +2sigma**2))/2.0)"
)
do_french_wilson = lambda ma: ma.enforce_positive_amplitudes()
elif n_refl < min_reflections:
raise ValueError(
"Insufficient reflections for French & Wilson procedure. "
"Either set fallback_to_flat_prior=True or truncate=False."
)
elif implementation == "cctbx":
do_french_wilson = lambda ma: ma.french_wilson(log=out)
else:
do_french_wilson = french_wilson
if merged_intensities.anomalous_flag():
anom_amplitudes = do_french_wilson(merged_intensities)
n_removed = merged_intensities.size() - anom_amplitudes.size()
assert anom_amplitudes.is_xray_amplitude_array()
amplitudes = anom_amplitudes.as_non_anomalous_array()
amplitudes = amplitudes.merge_equivalents(use_internal_variance=False).array()
dano = anom_amplitudes.anomalous_differences()
else:
anom_amplitudes = None
dano = None
amplitudes = do_french_wilson(merged_intensities)
n_removed = merged_intensities.size() - amplitudes.size()
logger.info("Total number of rejected intensities %s", n_removed)
logger.debug(out.getvalue())
return amplitudes, anom_amplitudes, dano
def merge_scaled_array_to_mtz_with_report_collection(
params: phil.scope_extract,
experiments: ExperimentList,
scaled_array,
wavelength: Optional[float] = None,
) -> Tuple[mtz.object, dict]:
if wavelength is None:
wavelength = np.mean(
np.array(
[expt.beam.get_wavelength() for expt in experiments], dtype=np.float
)
)
with collect_html_data_from_merge() as collector:
mtz_dataset = MTZDataClass(
wavelength=wavelength,
project_name=params.output.project_name,
dataset_name=params.output.dataset_names[0],
crystal_name=params.output.crystal_names[0],
)
merged, merged_anomalous, stats_summary = merge_scaled_array(
experiments,
scaled_array,
anomalous=params.anomalous,
assess_space_group=params.assess_space_group,
n_bins=params.merging.n_bins,
use_internal_variance=params.merging.use_internal_variance,
)
process_merged_data(
params, mtz_dataset, merged, merged_anomalous, stats_summary
)
mtz = make_merged_mtz_file([mtz_dataset])
json_data = collector.create_json()
return mtz, json_data
def process_merged_data(params, mtz_dataset, merged, merged_anomalous, stats_summary):
merged_array = merged.array()
# Save the relevant data in the mtz_dataset dataclass
# This will add the data for IMEAN/SIGIMEAN
mtz_dataset.merged_array = merged_array
if merged_anomalous:
merged_anomalous_array = merged_anomalous.array()
# This will add the data for I(+), I(-), SIGI(+), SIGI(-), N(+), N(-)
mtz_dataset.merged_anomalous_array = merged_anomalous_array
mtz_dataset.multiplicities = merged_anomalous.redundancies()
else:
merged_anomalous_array = None
# This will add the data for N
mtz_dataset.multiplicities = merged.redundancies()
if params.anomalous:
merged_intensities = merged_anomalous_array
else:
merged_intensities = merged_array
anom_amplitudes = None
if params.truncate:
amplitudes, anom_amplitudes, dano = truncate(
merged_intensities,
implementation=params.french_wilson.implementation,
min_reflections=params.french_wilson.min_reflections,
fallback_to_flat_prior=params.french_wilson.fallback_to_flat_prior,
)
# This will add the data for F, SIGF
mtz_dataset.amplitudes = amplitudes
# This will add the data for F(+), F(-), SIGF(+), SIGF(-)
mtz_dataset.anomalous_amplitudes = anom_amplitudes
# This will add the data for DANO, SIGDANO
mtz_dataset.dano = dano
# print out analysis statistics
try:
B_iso = show_wilson_scaling_analysis(merged_intensities)
except Exception as e:
logger.info(e)
else:
stats_summary.Wilson_B_iso = B_iso
if anom_amplitudes:
logger.info(make_dano_table(anom_amplitudes))
if stats_summary.merging_statistics_result:
logger.info(stats_summary)
if MergeJSONCollector.initiated:
stats_summary.anomalous_amplitudes = anom_amplitudes
MergeJSONCollector.data[mtz_dataset.wavelength] = stats_summary
| bsd-3-clause | e18aa00b4f16c3a00a6a5d5d5a3e3148 | 34.111111 | 99 | 0.645691 | 3.679355 | false | false | false | false |
dials/dials | tests/algorithms/integration/test_interface.py | 1 | 18674 | from __future__ import annotations
import random
import pytest
def test_split_blocks_1_frame():
from dials.algorithms.integration.integrator import JobList
from dials.array_family import flex
r = flex.reflection_table()
r["value1"] = flex.double()
r["value2"] = flex.int()
r["value3"] = flex.double()
r["bbox"] = flex.int6()
r["id"] = flex.int()
expected = []
for i in range(100):
x0 = random.randint(0, 100)
x1 = x0 + random.randint(1, 10)
y0 = random.randint(0, 100)
y1 = y0 + random.randint(1, 10)
z0 = random.randint(0, 100)
z1 = z0 + random.randint(1, 10)
v1 = random.uniform(0, 100)
v2 = random.randint(0, 100)
v3 = random.uniform(0, 100)
r.append(
{
"id": 0,
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z0, z1),
}
)
for z in range(z0, z1):
expected.append(
{
"id": 0,
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z, z + 1),
"partial_id": i,
}
)
jobs = JobList()
jobs.add((0, 1), (0, 111), 1, 0)
jobs.split(r)
assert len(r) == len(expected)
EPS = 1e-7
for r1, r2 in zip(r.rows(), expected):
assert r1["bbox"] == r2["bbox"]
assert r1["partial_id"] == r2["partial_id"]
assert abs(r1["value1"] - r2["value1"]) < EPS
assert r1["value2"] == r2["value2"]
assert abs(r1["value3"] - r2["value3"]) < EPS
def test_split_blocks_non_overlapping():
from scitbx.array_family import shared
from dials.algorithms.integration.integrator import JobList
from dials.array_family import flex
blocks = shared.tiny_int_2(
[
(0, 10),
(10, 20),
(20, 30),
(30, 35),
(35, 40),
(40, 50),
(50, 60),
(60, 70),
(70, 80),
(80, 90),
(90, 100),
(100, 110),
]
)
jobs = JobList((0, 1), blocks)
r = flex.reflection_table()
r["value1"] = flex.double()
r["value2"] = flex.int()
r["value3"] = flex.double()
r["bbox"] = flex.int6()
r["id"] = flex.int()
expected = []
for i in range(100):
x0 = random.randint(0, 100)
x1 = x0 + random.randint(1, 10)
y0 = random.randint(0, 100)
y1 = y0 + random.randint(1, 10)
z0 = random.randint(0, 100)
z1 = z0 + random.randint(1, 10)
v1 = random.uniform(0, 100)
v2 = random.randint(0, 100)
v3 = random.uniform(0, 100)
r.append(
{
"id": 0,
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z0, z1),
}
)
for j in range(len(blocks)):
b0 = blocks[j][0]
b1 = blocks[j][1]
if (
(z0 >= b0 and z1 <= b1)
or (z0 < b1 and z1 >= b1)
or (z0 < b0 and z1 > b0)
):
z00 = max(b0, z0)
z11 = min(b1, z1)
expected.append(
{
"id": 0,
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z00, z11),
"partial_id": i,
}
)
jobs.split(r)
assert len(r) == len(expected)
EPS = 1e-7
for r1, r2 in zip(r.rows(), expected):
assert r1["bbox"] == r2["bbox"]
assert r1["partial_id"] == r2["partial_id"]
assert abs(r1["value1"] - r2["value1"]) < EPS
assert r1["value2"] == r2["value2"]
assert abs(r1["value3"] - r2["value3"]) < EPS
def test_split_blocks_overlapping():
from scitbx.array_family import shared
from dials.algorithms.integration.integrator import JobList
from dials.array_family import flex
blocks = shared.tiny_int_2(
[
(0, 10),
(5, 15),
(10, 20),
(15, 25),
(20, 30),
(25, 35),
(30, 40),
(35, 45),
(40, 50),
(45, 55),
(50, 60),
(55, 65),
(60, 70),
(65, 75),
(70, 80),
(75, 85),
(80, 90),
(85, 95),
(90, 100),
(95, 105),
(100, 110),
]
)
jobs = JobList((0, 1), blocks)
r = flex.reflection_table()
r["value1"] = flex.double()
r["value2"] = flex.int()
r["value3"] = flex.double()
r["bbox"] = flex.int6()
r["id"] = flex.int()
expected = []
for i in range(100):
x0 = random.randint(0, 100)
x1 = x0 + random.randint(1, 10)
y0 = random.randint(0, 100)
y1 = y0 + random.randint(1, 10)
z0 = random.randint(0, 90)
z1 = z0 + random.randint(1, 20)
v1 = random.uniform(0, 100)
v2 = random.randint(0, 100)
v3 = random.uniform(0, 100)
r.append(
{
"id": 0,
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z0, z1),
}
)
expected.append(
{
"id": 0,
"value1": v1,
"value2": v2,
"value3": v3,
"bbox": (x0, x1, y0, y1, z0, z1),
}
)
jobs.split(r)
assert len(r) > 100
for r1 in r.rows():
v1 = r1["value1"]
v2 = r1["value2"]
v3 = r1["value3"]
bbox = r1["bbox"]
pid = r1["partial_id"]
z0 = bbox[4]
z1 = bbox[5]
success = False
for i in range(len(blocks)):
b0 = blocks[i][0]
b1 = blocks[i][1]
if z0 >= b0 and z1 <= b1:
success = True
break
assert success
v11 = expected[pid]["value1"]
v22 = expected[pid]["value2"]
v33 = expected[pid]["value3"]
bb = expected[pid]["bbox"]
assert v11 == v1
assert v22 == v2
assert v33 == v3
assert bb[0] == bbox[0]
assert bb[1] == bbox[1]
assert bb[2] == bbox[2]
assert bb[3] == bbox[3]
def test_reflection_manager():
from dials.array_family import flex
reflections = flex.reflection_table()
reflections["panel"] = flex.size_t()
reflections["bbox"] = flex.int6()
reflections["miller_index"] = flex.miller_index()
reflections["s1"] = flex.vec3_double()
reflections["xyzcal.px"] = flex.vec3_double()
reflections["xyzcal.mm"] = flex.vec3_double()
reflections["entering"] = flex.bool()
reflections["id"] = flex.int()
reflections["flags"] = flex.size_t()
width = 1000
height = 1000
nrefl = 10000
array_range = (0, 130)
block_size = 20
block_overlap = 10
random.seed(0)
processed = [[] for i in range(12)]
for i in range(nrefl):
x0 = random.randint(0, width - 10)
y0 = random.randint(0, height - 10)
zs = random.randint(2, 9)
x1 = x0 + random.randint(2, 10)
y1 = y0 + random.randint(2, 10)
for k, j in enumerate([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]):
m = k + i * 12
pos = random.choice(["left", "right", "centre"])
if pos == "left":
z0 = j - zs
z1 = j
elif pos == "right":
z0 = j
z1 = j + zs
else:
z0 = j - zs // 2
z1 = j + zs // 2
bbox = (x0, x1, y0, y1, z0, z1)
reflections.append(
{
"panel": random.randint(0, 1),
"bbox": bbox,
"flags": flex.reflection_table.flags.reference_spot,
}
)
processed[k].append(m)
# Add reflection to ignore
# zc = random.choice([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120])
# z0 = zc - 11
# z1 = zc + 11
# bbox = (x0, x1, y0, y1, z0, z1)
# reflections.append({
# "panel" : randint(0,1),
# "bbox" : bbox,
# "flags" : flex.reflection_table.flags.reference_spot
# })
from dials.algorithms.integration.integrator import JobList, ReflectionManager
jobs = JobList()
jobs.add((0, 1), array_range, block_size, block_overlap)
# Create the executor
executor = ReflectionManager(jobs, reflections)
# Ensure the tasks make sense
jobs = [executor.job(i) for i in range(len(executor))]
assert len(executor) == 12
assert not executor.finished()
assert len(jobs) == 12
assert jobs[0].frames() == (0, 20)
assert jobs[1].frames() == (10, 30)
assert jobs[2].frames() == (20, 40)
assert jobs[3].frames() == (30, 50)
assert jobs[4].frames() == (40, 60)
assert jobs[5].frames() == (50, 70)
assert jobs[6].frames() == (60, 80)
assert jobs[7].frames() == (70, 90)
assert jobs[8].frames() == (80, 100)
assert jobs[9].frames() == (90, 110)
assert jobs[10].frames() == (100, 120)
assert jobs[11].frames() == (110, 130)
# Get the task specs
data0 = executor.split(0)
data1 = executor.split(1)
data2 = executor.split(2)
data3 = executor.split(3)
data4 = executor.split(4)
data5 = executor.split(5)
data6 = executor.split(6)
data7 = executor.split(7)
data8 = executor.split(8)
data9 = executor.split(9)
data10 = executor.split(10)
data11 = executor.split(11)
assert len(data0) == len(processed[0])
assert len(data1) == len(processed[1])
assert len(data2) == len(processed[2])
assert len(data3) == len(processed[3])
assert len(data4) == len(processed[4])
assert len(data5) == len(processed[5])
assert len(data6) == len(processed[6])
assert len(data7) == len(processed[7])
assert len(data8) == len(processed[8])
assert len(data9) == len(processed[9])
assert len(data10) == len(processed[10])
assert len(data11) == len(processed[11])
# Add some results
data0["data"] = flex.double(len(data0), 1)
data1["data"] = flex.double(len(data1), 2)
data2["data"] = flex.double(len(data2), 3)
data3["data"] = flex.double(len(data3), 4)
data4["data"] = flex.double(len(data4), 5)
data5["data"] = flex.double(len(data5), 6)
data6["data"] = flex.double(len(data6), 7)
data7["data"] = flex.double(len(data7), 8)
data8["data"] = flex.double(len(data8), 9)
data9["data"] = flex.double(len(data9), 10)
data10["data"] = flex.double(len(data10), 11)
data11["data"] = flex.double(len(data11), 12)
# Accumulate the data again
assert not executor.finished()
executor.accumulate(0, data0)
executor.accumulate(1, data1)
executor.accumulate(2, data2)
executor.accumulate(3, data3)
executor.accumulate(4, data4)
executor.accumulate(5, data5)
executor.accumulate(6, data6)
executor.accumulate(7, data7)
executor.accumulate(8, data8)
executor.accumulate(9, data9)
executor.accumulate(10, data10)
executor.accumulate(11, data11)
assert executor.finished()
# Get results and check they're as expected
data = executor.data()
result = data["data"]
for i in range(len(processed)):
for j in range(len(processed[i])):
assert result[processed[i][j]] == i + 1
# Test passed
@pytest.mark.parametrize("nproc", [1, 2])
def test_integrator_3d(dials_data, nproc):
from math import pi
from dxtbx.model.experiment_list import ExperimentListFactory
from dials.algorithms.profile_model.gaussian_rs import Model
from dials.array_family import flex
path = dials_data("centroid_test_data", pathlib=True) / "experiments.json"
exlist = ExperimentListFactory.from_json_file(path)
exlist[0].profile = Model(
None, n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0
)
rlist = flex.reflection_table.from_predictions(exlist[0])
rlist["id"] = flex.int(len(rlist), 0)
rlist.compute_bbox(exlist)
rlist.compute_zeta_multi(exlist)
rlist.compute_d(exlist)
from libtbx.phil import parse
from dials.algorithms.integration.integrator import Integrator3D, phil_scope
params = phil_scope.fetch(
parse(
"""
integration.block.size=%d
integration.mp.nproc=%d
integration.profile_fitting=False
"""
% (5, nproc)
)
).extract()
integrator = Integrator3D(exlist, rlist, params)
integrator.integrate()
def test_summation(dials_data):
from math import pi
from dxtbx.model.experiment_list import ExperimentListFactory
from dials.algorithms.profile_model.gaussian_rs import Model
from dials.array_family import flex
path = dials_data("centroid_test_data", pathlib=True) / "experiments.json"
exlist = ExperimentListFactory.from_json_file(path)
exlist[0].profile = Model(
None, n_sigma=3, sigma_b=0.024 * pi / 180.0, sigma_m=0.044 * pi / 180.0
)
rlist = flex.reflection_table.from_predictions(exlist[0])
rlist["id"] = flex.int(len(rlist), 0)
def integrate(integrator_type, rlist):
from libtbx.phil import parse
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.integration.integrator import (
phil_scope as master_phil_scope,
)
rlist = rlist.copy()
phil_scope = parse(
f"""
integration.background.algorithm=null
integration.intensity.algorithm=sum
integration.intensity.sum.integrator={integrator_type}
integration.block.size=0.5
integration.profile_fitting=False
"""
)
params = master_phil_scope.fetch(source=phil_scope).extract()
integrator = create_integrator(params, exlist, rlist)
result = integrator.integrate()
return result
from libtbx.test_utils import approx_equal
def approx_equal_dict(a, b, k):
return approx_equal(a[k], b[k])
# Do summation by all different methods
result1 = integrate("3d", rlist)
result2 = integrate("flat3d", rlist)
result3 = integrate("2d", rlist)
result4 = integrate("single2d", rlist)
assert len(result1) >= len(rlist)
assert len(result2) >= len(rlist)
assert len(result3) >= len(rlist)
assert len(result4) >= len(rlist)
# result1 and result2 should be the same
assert len(result1) == len(result2)
for r1, r2 in zip(result1.rows(), result2.rows()):
assert r1["partial_id"] == r2["partial_id"]
assert r1["bbox"] == r2["bbox"]
assert r1["entering"] == r2["entering"]
assert r1["flags"] == r2["flags"]
assert r1["id"] == r2["id"]
assert r1["miller_index"] == r2["miller_index"]
assert r1["panel"] == r2["panel"]
assert approx_equal_dict(r1, r2, "d")
assert approx_equal_dict(r1, r2, "intensity.sum.value")
assert approx_equal_dict(r1, r2, "intensity.sum.variance")
assert approx_equal_dict(r1, r2, "lp")
assert approx_equal_dict(r1, r2, "partiality")
assert approx_equal_dict(r1, r2, "s1")
assert approx_equal_dict(r1, r2, "xyzcal.mm")
assert approx_equal_dict(r1, r2, "xyzcal.px")
assert approx_equal_dict(r1, r2, "zeta")
# result3 and result4 should be the same
assert len(result3) == len(result4)
for r3, r4 in zip(result3.rows(), result4.rows()):
assert r3["partial_id"] == r4["partial_id"]
assert r3["bbox"] == r4["bbox"]
assert r3["entering"] == r4["entering"]
assert r3["flags"] == r4["flags"]
assert r3["id"] == r4["id"]
assert r3["miller_index"] == r4["miller_index"]
assert r3["panel"] == r4["panel"]
assert approx_equal_dict(r3, r4, "d")
assert approx_equal_dict(r3, r4, "intensity.sum.value")
assert approx_equal_dict(r3, r4, "intensity.sum.variance")
assert approx_equal_dict(r3, r4, "lp")
assert approx_equal_dict(r3, r4, "partiality")
assert approx_equal_dict(r3, r4, "s1")
assert approx_equal_dict(r3, r4, "xyzcal.mm")
assert approx_equal_dict(r3, r4, "xyzcal.px")
assert approx_equal_dict(r3, r4, "xyzobs.px.value")
assert approx_equal_dict(r3, r4, "xyzobs.px.variance")
assert approx_equal_dict(r3, r4, "zeta")
# result3 should add up to result1
assert len(result3) >= len(result1)
expected1 = rlist.copy()
expected1["intensity.sum.value"] = flex.double(len(rlist), 0)
expected1["intensity.sum.variance"] = flex.double(len(rlist), 0)
for r1 in result1.rows():
pid = r1["partial_id"]
r2 = expected1[pid]
assert r1["entering"] == r2["entering"]
assert r1["id"] == r2["id"]
assert r1["miller_index"] == r2["miller_index"]
assert r1["panel"] == r2["panel"]
assert approx_equal_dict(r1, r2, "s1")
assert approx_equal_dict(r1, r2, "xyzcal.mm")
assert approx_equal_dict(r1, r2, "xyzcal.px")
expected1["intensity.sum.value"][pid] += r1["intensity.sum.value"]
expected1["intensity.sum.variance"][pid] += r1["intensity.sum.variance"]
expected3 = rlist.copy()
expected3["intensity.sum.value"] = flex.double(len(rlist), 0)
expected3["intensity.sum.variance"] = flex.double(len(rlist), 0)
for r1 in result3.rows():
pid = r1["partial_id"]
r2 = expected3[pid]
assert r1["entering"] == r2["entering"]
assert r1["id"] == r2["id"]
assert r1["miller_index"] == r2["miller_index"]
assert r1["panel"] == r2["panel"]
assert approx_equal_dict(r1, r2, "s1")
assert approx_equal_dict(r1, r2, "xyzcal.mm")
assert approx_equal_dict(r1, r2, "xyzcal.px")
expected3["intensity.sum.value"][pid] += r1["intensity.sum.value"]
expected3["intensity.sum.variance"][pid] += r1["intensity.sum.variance"]
for r1, r3 in zip(expected1.rows(), expected3.rows()):
assert approx_equal_dict(r1, r3, "intensity.sum.value")
assert approx_equal_dict(r1, r3, "intensity.sum.variance")
| bsd-3-clause | 7906468b4f2b313c98ad1ff8d8097dd3 | 31.085911 | 83 | 0.532826 | 3.229678 | false | false | false | false |
dials/dials | tests/algorithms/scaling/test_scaler_factory.py | 1 | 14719 | """
Tests for the scaler factory classes and helper functions.
"""
from __future__ import annotations
from unittest.mock import MagicMock, Mock
import pytest
from dxtbx.model import Crystal
from libtbx import phil
from dials.algorithms.scaling.error_model.error_model import BasicErrorModel
from dials.algorithms.scaling.scaler import (
MultiScaler,
NullScaler,
SingleScaler,
TargetScaler,
)
from dials.algorithms.scaling.scaler_factory import (
MultiScalerFactory,
SingleScalerFactory,
TargetScalerFactory,
create_scaler,
)
from dials.array_family import flex
from dials.util.options import ArgumentParser
def generated_refl(not_integrated=False, idval=0):
"""Generate a test reflection table."""
reflections = flex.reflection_table()
reflections["intensity.prf.value"] = flex.double([1.0, 10.0, 100.0, 1.0])
reflections["intensity.prf.variance"] = flex.double([1.0, 10.0, 100.0, 1.0])
reflections["intensity.sum.value"] = flex.double([12.0, 120.0, 1200.0, 21.0])
reflections["intensity.sum.variance"] = flex.double([12.0, 120.0, 2100.0, 1.0])
# reflections['inverse_scale_factor'] = flex.double(4, 1.0)
reflections["miller_index"] = flex.miller_index(
[(1, 0, 0), (0, 0, 1), (2, 0, 0), (2, 2, 2)]
) # don't change
reflections["d"] = flex.double([0.8, 2.0, 2.0, 0.0]) # don't change
reflections["partiality"] = flex.double([1.0, 1.0, 1.0, 1.0])
reflections["xyzobs.px.value"] = flex.vec3_double(
[(0.0, 0.0, 0.0), (0.0, 0.0, 5.0), (0.0, 0.0, 10.0), (0.0, 0.0, 10.0)]
)
reflections["s1"] = flex.vec3_double(
[(0.0, 0.1, 1.0), (0.0, 0.1, 1.0), (0.0, 0.1, 1.0), (0.0, 0.1, 1.0)]
)
if not_integrated:
reflections.set_flags(
flex.bool([False, False, False, False]), reflections.flags.integrated
)
else:
reflections.set_flags(
flex.bool([True, True, False, False]), reflections.flags.integrated
)
reflections.set_flags(
flex.bool([False, False, True, True]), reflections.flags.bad_for_scaling
)
reflections["id"] = flex.int(4, idval)
reflections.experiment_identifiers()[idval] = str(idval)
return reflections
@pytest.fixture
def refl_to_filter():
"""Generate a separate reflection table for filtering"""
reflections = flex.reflection_table()
reflections["partiality"] = flex.double([0.1, 1.0, 1.0, 1.0, 1.0, 1.0])
reflections["id"] = flex.int(reflections.size(), 0)
reflections["intensity.sum.value"] = flex.double([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
reflections["intensity.sum.variance"] = flex.double([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
reflections.set_flags(
flex.bool([True, False, True, True, True, True]),
reflections.flags.integrated_sum,
)
return reflections
@pytest.fixture
def prf_sum_refl_to_filter():
"""Generate a separate reflection table for filtering"""
reflections = flex.reflection_table()
reflections["partiality"] = flex.double(5, 1.0)
reflections["id"] = flex.int(reflections.size(), 0)
reflections.experiment_identifiers()[0] = "0"
reflections["intensity.sum.value"] = flex.double([1.0, 2.0, 3.0, 4.0, 5.0])
reflections["intensity.sum.variance"] = flex.double(5, 1.0)
reflections["intensity.prf.value"] = flex.double([11.0, 12.0, 13.0, 14.0, 15.0])
reflections["intensity.prf.variance"] = flex.double(5, 1.0)
reflections["miller_index"] = flex.miller_index([(0, 0, 1)] * 5)
reflections.set_flags(
flex.bool([False, False, True, True, True]),
reflections.flags.integrated_sum,
)
reflections.set_flags(
flex.bool([True, False, False, True, True]),
reflections.flags.integrated_prf,
)
return reflections
def test_refl_and_exp(mock_scaling_component, idval=0):
r = test_refl(idval=idval)
exp = mock_exp(mock_scaling_component, idval=idval)
return r, exp
def test_refl_and_exp_list(mock_scaling_component, n=1):
rlist = []
explist = []
for i in range(n):
rlist.append(test_refl(idval=i))
explist.append(mock_exp(mock_scaling_component, idval=i))
return rlist, explist
def test_refl(idval=0):
"""Generate a test reflection table."""
return generated_refl(idval=idval)
@pytest.fixture
def refl_list():
"""Make a list of three reflection tables."""
refl_list = [generated_refl()]
refl_list.append(generated_refl())
refl_list.append(generated_refl())
return refl_list
@pytest.fixture
def generated_param():
"""Generate a param phil scope."""
phil_scope = phil.parse(
"""
include scope dials.algorithms.scaling.scaling_options.phil_scope
""",
process_includes=True,
)
parser = ArgumentParser(phil=phil_scope, check_format=False)
parameters, _ = parser.parse_args(args=[], quick_parse=True, show_diff_phil=False)
parameters.__inject__("model", "KB")
parameters.scaling_options.free_set_percentage = 50.0
parameters.scaling_options.emax = 0
parameters.reflection_selection.method = "use_all"
return parameters
@pytest.fixture
def mock_scaling_component():
"""Mock scaling component to allow creation of a scaling model."""
component = MagicMock()
component.n_params = 2
component.inverse_scales = flex.double([0.9, 1.1])
return component
def mock_exp(mock_scaling_component, idval=0):
"""Mock experiments object for initialising a scaler."""
def side_effect_config_table(*args):
"""Side effect to mock configure reflection table
call during initialisation."""
return args[0]
exp = MagicMock()
exp.identifier = str(idval)
exp.scaling_model.components = {"scale": mock_scaling_component}
exp.scaling_model.consecutive_refinement_order = ["scale"]
exp.scaling_model.is_scaled = False
exp.scaling_model.error_model = BasicErrorModel()
exp.scaling_model.configure_reflection_table.side_effect = side_effect_config_table
exp_dict = {
"__id__": "crystal",
"real_space_a": [1.0, 0.0, 0.0],
"real_space_b": [0.0, 1.0, 0.0],
"real_space_c": [0.0, 0.0, 2.0],
"space_group_hall_symbol": " C 2y",
}
exp.crystal = Crystal.from_dict(exp_dict)
exp.scan.get_oscillation.return_value = (0, 1.0)
exp.beam.get_sample_to_source_direction.return_value = (0.0, 0.0, -1.0)
exp.goniometer.get_rotation_axis.return_value = (0.0, 0.0, 1.0)
return exp
def mock_explist_3exp(mock_scaling_component):
"""A mock experimentlist, containing one mock exp instance three times."""
exp = [mock_exp(mock_scaling_component)]
exp.append(mock_exp(mock_scaling_component))
exp.append(mock_exp(mock_scaling_component))
return exp
@pytest.fixture
def mock_scaled_exp():
"""A mock experiments object with scaling_model.is_scaled = True"""
exp = Mock()
exp.scaling_model.is_scaled = True
return exp
@pytest.fixture
def mock_unscaled_exp():
"""A mock experiments object with scaling_model.is_scaled = False"""
exp = Mock()
exp.scaling_model.is_scaled = False
return exp
@pytest.fixture
def mock_experimentlist(mock_scaled_exp, mock_unscaled_exp):
"""A mock experimentlist of mock scaled/unscaled mock exp."""
explist = [
mock_scaled_exp,
mock_scaled_exp,
mock_unscaled_exp,
mock_scaled_exp,
mock_unscaled_exp,
]
return explist
def test_SingleScalerFactory(generated_param, refl_to_filter, mock_scaling_component):
"""Test the single scaler factory."""
test_refl, exp = test_refl_and_exp(mock_scaling_component)
# Test that all required attributes get added with standard params.
assert all(
(i not in test_refl) for i in ["inverse_scale_factor", "intensity", "variance"]
)
# Test default, (no split into free set)
ss = SingleScalerFactory.create(generated_param, exp, test_refl)
assert isinstance(ss, SingleScaler)
assert all(
i in ss.reflection_table
for i in ["inverse_scale_factor", "intensity", "variance"]
)
# Test reflection filtering
rt = SingleScalerFactory.filter_bad_reflections(refl_to_filter)
assert list(rt.get_flags(rt.flags.excluded_for_scaling)) == [
True,
True,
False,
False,
False,
False,
]
def test_selection_of_profile_or_summation_intensities(
generated_param, prf_sum_refl_to_filter, mock_scaling_component
):
_, exp = test_refl_and_exp(mock_scaling_component)
# Test that all required attributes get added with standard params.
assert all(
(i not in prf_sum_refl_to_filter)
for i in ["inverse_scale_factor", "intensity", "variance"]
)
# Test default, (no split into free set)
ss = SingleScalerFactory.create(generated_param, exp, prf_sum_refl_to_filter)
assert isinstance(ss, SingleScaler)
rt = ss.reflection_table
assert all(i in rt for i in ["inverse_scale_factor", "intensity", "variance"])
assert list(rt.get_flags(rt.flags.excluded_for_scaling)) == [
False,
True,
False,
False,
False,
]
# test correct initial intensities have been chosen - should be prf then sum
assert list(rt["intensity"]) == [11.0, 2.0, 3.0, 14.0, 15.0]
def test_TargetScalerFactory(generated_param, mock_scaling_component):
"""Test the target scaler factory."""
refl_list, explist = test_refl_and_exp_list(mock_scaling_component, 3)
# Test standard initialisation.
assert generated_param.scaling_options.use_free_set is False # just to check
explist[0].scaling_model.is_scaled = True
explist[1].scaling_model.is_scaled = True
target = TargetScalerFactory.create(generated_param, explist, refl_list)
assert isinstance(target, TargetScaler)
assert len(target.single_scalers) == 2
assert len(target.unscaled_scalers) == 1
assert set(target.single_scalers[0].reflection_table["id"]) == {0}
assert set(target.single_scalers[1].reflection_table["id"]) == {1}
assert set(target.unscaled_scalers[0].reflection_table["id"]) == {2}
# Now test converting targetscaler to multiscaler
multiscaler = MultiScalerFactory.create_from_targetscaler(target)
assert isinstance(multiscaler, MultiScaler)
assert len(multiscaler.single_scalers) == 3
# Test for correct initialisation when scaling against a target model.
generated_param.scaling_options.reference = True
target = TargetScalerFactory.create_for_target_against_reference(
generated_param, explist, refl_list
)
assert isinstance(target.single_scalers[0], NullScaler)
# This time make one dataset bad, and check it gets removed
refl_list, explist = test_refl_and_exp_list(mock_scaling_component, 3)
generated_param.scaling_options.reference = False
refl_list[1].unset_flags(flex.bool(4, True), refl_list[1].flags.integrated_prf)
refl_list[1].unset_flags(flex.bool(4, True), refl_list[1].flags.integrated_sum)
explist[0].scaling_model.is_scaled = True
target = TargetScalerFactory.create(generated_param, explist, refl_list)
assert isinstance(target, TargetScaler)
assert len(target.single_scalers) == 1
assert len(target.unscaled_scalers) == 1
assert set(target.single_scalers[0].reflection_table["id"]) == {0}
assert set(target.unscaled_scalers[0].reflection_table["id"]) == {2}
refl_list, explist = test_refl_and_exp_list(mock_scaling_component, 3)
refl_list[0].unset_flags(flex.bool(4, True), refl_list[0].flags.integrated_prf)
refl_list[0].unset_flags(flex.bool(4, True), refl_list[0].flags.integrated_sum)
explist[0].scaling_model.is_scaled = True
explist[1].scaling_model.is_scaled = True
target = TargetScalerFactory.create(generated_param, explist, refl_list)
assert isinstance(target, TargetScaler)
assert len(target.single_scalers) == 1
assert len(target.unscaled_scalers) == 1
assert set(target.single_scalers[0].reflection_table["id"]) == {1}
assert set(target.unscaled_scalers[0].reflection_table["id"]) == {2}
def test_MultiScalerFactory(generated_param, mock_scaling_component, refl_list):
"""Test the MultiScalerFactory."""
refl_list, explist = test_refl_and_exp_list(mock_scaling_component, 3)
multiscaler = MultiScalerFactory.create(generated_param, explist, refl_list)
assert isinstance(multiscaler, MultiScaler)
assert len(multiscaler.single_scalers) == 3
for i in range(3):
assert set(multiscaler.single_scalers[i].reflection_table["id"]) == {i}
# This time make one dataset bad, and check it gets removed
r1 = generated_refl(not_integrated=True)
r2 = generated_refl()
r3 = generated_refl()
new_list = [r1, r2, r3]
multiscaler = MultiScalerFactory.create(
generated_param, mock_explist_3exp(mock_scaling_component), new_list
)
assert isinstance(multiscaler, MultiScaler)
assert len(multiscaler.single_scalers) == 2
r1 = multiscaler.single_scalers[0].reflection_table
assert list(r1.get_flags(r1.flags.integrated)) == [True, True, False, False]
r2 = multiscaler.single_scalers[1].reflection_table
assert list(r2.get_flags(r2.flags.integrated)) == [True, True, False, False]
def test_scaler_factory_helper_functions(
mock_experimentlist, generated_param, refl_list, mock_scaling_component
):
"""Test the helper functions."""
test_refl, exp = test_refl_and_exp(mock_scaling_component)
# Test create_scaler
# Test case for single refl and exp
scaler = create_scaler(generated_param, [exp], [test_refl])
assert isinstance(scaler, SingleScaler)
# If none or allscaled
explist = mock_explist_3exp(mock_scaling_component)
scaler = create_scaler(generated_param, explist, refl_list)
assert isinstance(scaler, MultiScaler)
explist[0].scaling_model.is_scaled = False
# ^ changes all in list as same instance of exp.
scaler = create_scaler(
generated_param, mock_explist_3exp(mock_scaling_component), refl_list
)
assert isinstance(scaler, MultiScaler)
# If only some scaled
explist = []
explist.append(mock_exp(mock_scaling_component))
explist.append(mock_exp(mock_scaling_component))
explist[1].scaling_model.is_scaled = True
r1 = generated_refl()
r2 = generated_refl()
refl_list = [r1, r2]
scaler = create_scaler(generated_param, explist, refl_list)
assert isinstance(scaler, TargetScaler)
# If no reflections passed in.
with pytest.raises(ValueError):
scaler = create_scaler(
generated_param, mock_explist_3exp(mock_scaling_component), []
)
| bsd-3-clause | 22cd3eced5a8cb9f11878688ef376287 | 35.889724 | 87 | 0.671377 | 3.279635 | false | true | false | false |
dials/dials | tests/command_line/test_indexed_as_integrated.py | 1 | 1697 | from __future__ import annotations
import procrunner
import iotbx.merging_statistics
def test_indexed_as_integrated(dials_data, tmp_path):
data_dir = dials_data("insulin_processed", pathlib=True)
refl = data_dir / "indexed.refl"
expt = data_dir / "indexed.expt"
command = [
"dials.indexed_as_integrated",
refl,
expt,
"output.reflections=output.refl",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "output.refl").is_file()
# now make sure we can run dials.symmetry and dials.scale successfully
sym_command = ["dials.symmetry", tmp_path / "output.refl", expt]
result = procrunner.run(sym_command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "symmetrized.refl").is_file()
assert (tmp_path / "symmetrized.expt").is_file()
scale_command = [
"dials.scale",
tmp_path / "symmetrized.refl",
tmp_path / "symmetrized.expt",
"unmerged_mtz=scaled.mtz",
"d_min=2.0",
]
result = procrunner.run(scale_command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.refl").is_file()
assert (tmp_path / "scaled.expt").is_file()
assert (tmp_path / "scaled.mtz").is_file()
i_obs = iotbx.merging_statistics.select_data(str(tmp_path / "scaled.mtz"), None)
result = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
use_internal_variance=False,
eliminate_sys_absent=False,
)
assert result.overall.cc_one_half > 0.8
| bsd-3-clause | 34affdffd1066f520793174a88933d6f | 32.94 | 84 | 0.657042 | 3.20794 | false | false | false | false |
dials/dials | src/dials/algorithms/refinement/parameterisation/scan_varying_crystal_parameters.py | 1 | 7512 | from __future__ import annotations
from scitbx import matrix
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationMixin,
CrystalUnitCellMixin,
)
from dials.algorithms.refinement.parameterisation.scan_varying_model_parameters import (
GaussianSmoother,
ScanVaryingModelParameterisation,
ScanVaryingParameterSet,
)
from dials.algorithms.refinement.refinement_helpers import CrystalOrientationCompose
class ScanVaryingCrystalOrientationParameterisation(
ScanVaryingModelParameterisation, CrystalOrientationMixin
):
"""Scan-varying parameterisation for crystal orientation, with angles
expressed in mrad"""
def __init__(self, crystal, t_range, num_intervals, experiment_ids=None):
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan varying crystal orientation parameterisation
# is an orientation
# matrix '[U](t)', expressed as a function of image number 't'
# in a sequential scan.
#
# The initial state is a snapshot of the crystal orientation
# at the point of initialisation '[U0]', which is independent of
# image number.
#
# Future states are composed by
# rotations around axes of the phi-axis frame by Tait-Bryan angles.
#
# [U](t) = [Phi3](t)[Phi2](t)[Phi1](t)[U0]
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = matrix.sqr(crystal.get_U())
self._U_at_t = istate
# Factory function to provide to _build_p_list
def parameter_type(value, axis, ptype, name):
return ScanVaryingParameterSet(value, nv, axis, ptype, name)
# Build the parameter list
p_list = self._build_p_list(parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# Extract orientation from the initial state
U0 = self._initial_state
# extract parameter sets from the internal list
phi1_set, phi2_set, phi3_set = self._param
# extract angles and other data at time t using the smoother
phi1, phi1_weights, phi1_sumweights = self._smoother.value_weight(t, phi1_set)
phi2, phi2_weights, phi2_sumweights = self._smoother.value_weight(t, phi2_set)
phi3, phi3_weights, phi3_sumweights = self._smoother.value_weight(t, phi3_set)
# calculate derivatives of angles wrt underlying parameters.
dphi1_dp = phi1_weights * (1.0 / phi1_sumweights)
dphi2_dp = phi2_weights * (1.0 / phi2_sumweights)
dphi3_dp = phi3_weights * (1.0 / phi3_sumweights)
# calculate state and derivatives using the helper class
coc = CrystalOrientationCompose(
U0, phi1, phi1_set.axis, phi2, phi2_set.axis, phi3, phi3_set.axis
)
self._U_at_t = coc.U()
dU_dphi1 = coc.dU_dphi1()
dU_dphi2 = coc.dU_dphi2()
dU_dphi3 = coc.dU_dphi3()
# calculate derivatives of state wrt underlying parameters
dU_dp1 = [None] * dphi1_dp.size
for (i, v) in dphi1_dp:
dU_dp1[i] = dU_dphi1 * v
dU_dp2 = [None] * dphi2_dp.size
for (i, v) in dphi2_dp:
dU_dp2[i] = dU_dphi2 * v
dU_dp3 = [None] * dphi3_dp.size
for (i, v) in dphi3_dp:
dU_dp3[i] = dU_dphi3 * v
# store derivatives as list-of-lists
self._dstate_dp = [dU_dp1, dU_dp2, dU_dp3]
return
def get_state(self):
"""Return crystal orientation matrix [U] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._U_at_t
class ScanVaryingCrystalUnitCellParameterisation(
ScanVaryingModelParameterisation, CrystalUnitCellMixin
):
"""Scan-varying parameterisation for the crystal unit cell"""
def __init__(
self,
crystal,
t_range,
num_intervals,
experiment_ids=None,
set_state_uncertainties=False,
):
self._set_state_uncertainties = set_state_uncertainties
from scitbx import matrix
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan-varying unit cell parameterisation is the
# reciprocal space orthogonalisation matrix '[B](t)', expressed as a
# function of image number 't' in a sequential scan.
# Other comments from CrystalUnitCellParameterisation are relevant here
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = None
self._B_at_t = matrix.sqr(crystal.get_B())
# Factory function to provide to _build_p_list
def parameter_type(value, name):
return ScanVaryingParameterSet(value, nv, name=name)
# Build the parameter list
p_list = self._build_p_list(crystal, parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# extract values and weights at time t using the smoother
vals, weights, sumweights = zip(
*(self._smoother.value_weight(t, pset) for pset in self._param)
)
# calculate derivatives of metrical matrix parameters wrt underlying
# scan-varying parameters
inv_sumw = [1.0 / sw for sw in sumweights]
dvals_dp = [e * isw for e, isw in zip(weights, inv_sumw)]
# calculate new B and derivatives
self._B_at_t, dB_dval = self._compose_core(vals)
# calculate derivatives of state wrt underlying parameters
self._dstate_dp = [
[b * e for e in a.as_dense_vector()] for a, b in zip(dvals_dp, dB_dval)
]
self._dstate_dp = [[None] * e.size for e in dvals_dp]
for i, (dv, dB) in enumerate(zip(dvals_dp, dB_dval)):
for j, e in dv:
self._dstate_dp[i][j] = e * dB
return
def get_state(self):
"""Return crystal orthogonalisation matrix [B] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._B_at_t
def set_state_uncertainties(self, var_cov_list):
"""Send the calculated variance-covariance of the elements of the B matrix
for all scan points back to the crystal model, if required
"""
if not self._set_state_uncertainties:
return
# Convert list of 9*9 matrices to a 3d array
from scitbx.array_family import flex
B_cov = flex.double(flex.grid(len(var_cov_list), 9, 9))
for i, v in enumerate(var_cov_list):
v = v.as_flex_double_matrix()
v.reshape(flex.grid(1, 9, 9))
B_cov[i : (i + 1), :, :] = v
# Pass it back to the model
self._model.set_B_covariance_at_scan_points(B_cov)
| bsd-3-clause | fbf7cc4df184544d16825101321f6b7c | 33.617512 | 88 | 0.622204 | 3.500466 | false | false | false | false |
dials/dials | src/dials/command_line/show.py | 1 | 22945 | from __future__ import annotations
import os
import sys
import numpy as np
import iotbx.phil
from cctbx import uctbx
from dxtbx.model.experiment_list import ExperimentListFactory
from scitbx.math import five_number_summary
import dials.util
from dials.array_family import flex
from dials.util import Sorry, tabulate
help_message = """
Examples::
dials.show models.expt
dials.show image_*.cbf
dials.show observations.refl
"""
phil_scope = iotbx.phil.parse(
"""\
show_scan_varying = False
.type = bool
.help = "Whether or not to show the crystal at each scan point."
show_shared_models = False
.type = bool
.help = "Show which models are linked to which experiments"
show_all_reflection_data = False
.type = bool
.help = "Whether or not to print individual reflections"
show_intensities = False
.type = bool
show_centroids = False
.type = bool
show_profile_fit = False
.type = bool
show_flags = False
.type = bool
.help = "Show a summary table of reflection flags"
show_identifiers = False
.type = bool
.help = "Show experiment identifiers map if set"
image_statistics{
show_corrected = False
.type = bool
.help = "Show statistics on the distribution of values in each corrected image"
show_raw = False
.type = bool
.help = "Show statistics on the distribution of values in each raw image"
}
max_reflections = None
.type = int
.help = "Limit the number of reflections in the output."
""",
process_includes=True,
)
def beam_centre_mm(detector, s0):
x, y = (None, None)
for panel_id, panel in enumerate(detector):
try:
x, y = panel.get_ray_intersection(s0)
except RuntimeError:
continue
else:
if panel.is_coord_valid_mm((x, y)):
break
else:
x, y = (None, None)
return panel_id, (x, y)
def beam_centre_raw_image_px(detector, s0):
panel_id, (x, y) = beam_centre_mm(detector, s0)
panel = detector[panel_id]
x_px, y_px = panel.millimeter_to_pixel((x, y))
offset = panel.get_raw_image_offset()
return x_px + offset[0], y_px + offset[1]
def show_beam(detector, beam):
# standard static beam model string
s = str(beam)
# report whether the beam is scan-varying
if beam.num_scan_points > 0:
s += " s0 sampled at " + str(beam.num_scan_points) + " scan points\n"
# add static model beam centres
panel_id, (x, y) = beam_centre_mm(detector, beam.get_s0())
if panel_id >= 0 and x is not None and y is not None:
x_px, y_px = detector[panel_id].millimeter_to_pixel((x, y))
if len(detector) > 1:
beam_centre_mm_str = " mm: panel %i, (%.2f,%.2f)" % (panel_id, x, y)
beam_centre_px_str = " px: panel %i, (%.2f,%.2f)" % (
panel_id,
x_px,
y_px,
)
x_raw_px, y_raw_px = beam_centre_raw_image_px(detector, beam.get_s0())
beam_centre_raw_px_str = " px, raw image: ({:.2f},{:.2f})".format(
x_raw_px,
y_raw_px,
)
x_raw_mm, y_raw_mm = detector[panel_id].pixel_to_millimeter(
(x_raw_px, y_raw_px)
)
beam_centre_raw_mm_str = " mm, raw image: ({:.2f},{:.2f})".format(
x_raw_mm,
y_raw_mm,
)
else:
beam_centre_mm_str = f" mm: ({x:.2f},{y:.2f})"
beam_centre_px_str = f" px: ({x_px:.2f},{y_px:.2f})"
beam_centre_raw_px_str = ""
beam_centre_raw_mm_str = ""
s += "\nBeam centre: \n"
s += beam_centre_mm_str + "\n" + beam_centre_px_str + "\n"
if beam_centre_raw_mm_str:
s += beam_centre_raw_mm_str + "\n"
if beam_centre_raw_px_str:
s += beam_centre_raw_px_str + "\n"
# report range of scan-varying model beam centres
if beam.num_scan_points > 0:
# get scan-varying beam centres, ensuring all on same panel
sv_s0 = beam.get_s0_at_scan_points()
impacts = [beam_centre_mm(detector, s0) for s0 in sv_s0]
pnl, xy = zip(*impacts)
uniq_pnls = set(pnl)
if len(uniq_pnls) > 1 or min(uniq_pnls) < 0:
return s
if any(e == (None, None) for e in xy):
return s
pnl = list(uniq_pnls)[0]
x_mm, y_mm = zip(*xy)
# convert to pixels
xy = [detector[pnl].millimeter_to_pixel(e) for e in xy]
x_px, y_px = zip(*xy)
s += "Beam centre range (mm): ([{:.2f},{:.2f}],[{:.2f},{:.2f}])\n".format(
min(x_mm),
max(x_mm),
min(y_mm),
max(y_mm),
)
s += "Beam centre range (px): ([{:.2f},{:.2f}],[{:.2f},{:.2f}])\n".format(
min(x_px),
max(x_px),
min(y_px),
max(y_px),
)
return s
def show_goniometer(goniometer):
# standard static goniometer model string
s = str(goniometer)
# report whether the goniometer is scan-varying
if goniometer.num_scan_points > 0:
s += (
" Setting rotation sampled at "
+ str(goniometer.num_scan_points)
+ " scan points\n"
)
return s
@dials.util.show_mail_handle_errors()
def run(args=None):
import dials.util.log
dials.util.log.print_banner()
from dials.util.options import (
ArgumentParser,
reflections_and_experiments_from_files,
)
usage = "dials.show [options] models.expt | image_*.cbf"
parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
read_experiments_from_images=True,
read_reflections=True,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=True)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if len(experiments) == 0 and len(reflections) == 0:
parser.print_help()
exit()
if len(experiments):
if not all(e.detector for e in experiments):
sys.exit("Error: experiment has no detector")
if not all(e.beam for e in experiments):
sys.exit("Error: experiment has no beam")
print(show_experiments(experiments, show_scan_varying=params.show_scan_varying))
if params.image_statistics.show_raw:
show_image_statistics(experiments, "raw")
if params.image_statistics.show_corrected:
show_image_statistics(experiments, "corrected")
if params.show_shared_models:
print()
print(model_connectivity(experiments))
if len(reflections):
print(
show_reflections(
reflections,
show_intensities=params.show_intensities,
show_profile_fit=params.show_profile_fit,
show_centroids=params.show_centroids,
show_all_reflection_data=params.show_all_reflection_data,
show_flags=params.show_flags,
max_reflections=params.max_reflections,
show_identifiers=params.show_identifiers,
)
)
def show_experiments(experiments, show_scan_varying=False):
text = []
for i_expt, expt in enumerate(experiments):
text.append("Experiment %i:" % i_expt)
if expt.imageset:
format_class = expt.imageset.get_format_class()
if not format_class.is_abstract():
text.append(f"Format class: {format_class.__name__}")
if expt.identifier != "":
text.append(f"Experiment identifier: {expt.identifier}")
# get_template() only exists for ImageSequence, not ImageSet
try:
text.append(f"Image template: {expt.imageset.get_template()}")
except AttributeError:
pass
text.append(str(expt.detector))
text.append(
"Max resolution (at corners): %f"
% (expt.detector.get_max_resolution(expt.beam.get_s0()))
)
text.append(
"Max resolution (inscribed): %f"
% (expt.detector.get_max_inscribed_resolution(expt.beam.get_s0()))
)
text.append("")
text.append(show_beam(expt.detector, expt.beam))
if expt.scan is not None:
text.append(str(expt.scan))
if expt.goniometer is not None:
text.append(show_goniometer(expt.goniometer))
if expt.crystal is not None:
text.append(expt.crystal.as_str(show_scan_varying=show_scan_varying))
if expt.crystal.num_scan_points:
abc = flex.vec3_double()
angles = flex.vec3_double()
for n in range(expt.crystal.num_scan_points):
(
a,
b,
c,
alpha,
beta,
gamma,
) = expt.crystal.get_unit_cell_at_scan_point(n).parameters()
abc.append((a, b, c))
angles.append((alpha, beta, gamma))
a, b, c = abc.mean()
alpha, beta, gamma = angles.mean()
mean_unit_cell = uctbx.unit_cell((a, b, c, alpha, beta, gamma))
text.append(f" Average unit cell: {mean_unit_cell}")
if expt.profile is not None:
text.append(str(expt.profile))
if expt.scaling_model is not None:
text.append(str(expt.scaling_model))
return "\n".join(text)
def show_image_statistics(experiments, im_type):
if im_type == "raw":
raw = True
elif im_type == "corrected":
raw = False
else:
raise ValueError(f"Unknown im_type: {im_type}")
# To show image statistics, check_format has to be true. So we have to reinstatiate
# the experiment list here
try:
experiments = ExperimentListFactory.from_json(
experiments.as_json(), check_format=True
)
except OSError as e:
raise Sorry(
f"Unable to read image data. Please check {e.filename} is accessible"
)
print(f"Five number summary of the {im_type} images")
for i_expt, expt in enumerate(experiments):
for i in range(len(expt.imageset)):
identifier = os.path.basename(expt.imageset.get_image_identifier(i))
if raw:
pnl_data = expt.imageset.get_raw_data(i)
else:
pnl_data = expt.imageset.get_corrected_data(i)
if not isinstance(pnl_data, tuple):
pnl_data = (pnl_data,)
flat_data = pnl_data[0].as_1d()
for p in pnl_data[1:]:
flat_data.extend(p.as_1d())
fns = five_number_summary(flat_data)
print(
"{}: Min: {:.1f} Q1: {:.1f} Med: {:.1f} Q3: {:.1f} Max: {:.1f}".format(
identifier, *fns
)
)
def model_connectivity(experiments):
def model_connectivity_impl(experiments, model):
text = [""]
text.append(f"{model.capitalize()}:")
models = getattr(experiments, f"{model}s")()
rows = [[""] + [str(j) for j in range(len(models))]]
for j, e in enumerate(experiments):
row = ["Experiment %d" % j]
for m in models:
if getattr(e, model) is m:
row.append("x")
else:
row.append(".")
rows.append(row)
text.append(tabulate(rows, tablefmt="plain"))
return text
if len(experiments) == 1:
return ""
text = []
text.append("Experiment / Models")
text.extend(model_connectivity_impl(experiments, "detector"))
text.extend(model_connectivity_impl(experiments, "crystal"))
text.extend(model_connectivity_impl(experiments, "beam"))
return "\n".join(text)
def _create_flag_count_table(table):
"""Generate a summary table of flag values in a reflection table.
:param table: A reflection table
:returns: A string of the formatted flags table
"""
# Calculate the counts of entries that match each flag
numpy_flags = table["flags"].as_numpy_array()
flag_count = {
flag: np.sum(numpy_flags & value != 0)
for value, flag in table.flags.values.items()
}
# Work out the numeric-value order of the flags
flag_order = sorted(table.flags.values.values(), key=lambda x: x.real)
# Build the actual table
flag_rows = [["Flag", "Count", "%"]]
max_count_len = max(5, len(str(max(flag_count.values()))))
last_flag = None
for flag in flag_order:
indent = ""
# As a hint for reading, indent any 'summary' flags.
# A summary flag is any flag which overlaps with the previous one.
if last_flag and (last_flag.real & flag.real):
indent = " "
last_flag = flag
# Add the row to the table we're building
flag_rows.append(
[
indent + flag.name,
"{:{:d}d}".format(flag_count[flag], max_count_len),
f"{100 * flag_count[flag] / len(table):5.01f}",
]
)
# Build the array of output strings
text = []
text.append("Reflection flags:")
text.append(tabulate(flag_rows, headers="firstrow"))
return "\n".join(text)
def show_reflections(
reflections,
show_intensities=False,
show_profile_fit=False,
show_centroids=False,
show_all_reflection_data=False,
show_flags=False,
max_reflections=None,
show_identifiers=False,
):
text = []
from orderedset import OrderedSet
formats = {
"miller_index": "%i, %i, %i",
"d": "%.2f",
"qe": "%.3f",
"dqe": "%.3f",
"id": "%i",
"imageset_id": "%i",
"panel": "%i",
"flags": "%i",
"background.mean": "%.1f",
"background.dispersion": "%.1f",
"background.mse": "%.1f",
"background.sum.value": "%.1f",
"background.sum.variance": "%.1f",
"intensity.prf.value": "%.1f",
"intensity.prf.variance": "%.1f",
"intensity.sum.value": "%.1f",
"intensity.sum.variance": "%.1f",
"intensity.cor.value": "%.1f",
"intensity.cor.variance": "%.1f",
"intensity.scale.value": "%.1f",
"intensity.scale.variance": "%.1f",
"Ih_values": "%.1f",
"lp": "%.3f",
"num_pixels.background": "%i",
"num_pixels.background_used": "%i",
"num_pixels.foreground": "%i",
"num_pixels.valid": "%i",
"partial_id": "%i",
"partiality": "%.4f",
"profile.correlation": "%.3f",
"profile.rmsd": "%.3f",
"xyzcal.mm": "%.2f, %.2f, %.2f",
"xyzcal.px": "%.2f, %.2f, %.2f",
"delpsical.rad": "%.3f",
"delpsical2": "%.3f",
"delpsical.weights": "%.3f",
"xyzobs.mm.value": "%.2f, %.2f, %.2f",
"xyzobs.mm.variance": "%.4e, %.4e, %.4e",
"xyzobs.px.value": "%.2f, %.2f, %.2f",
"xyzobs.px.variance": "%.4f, %.4f, %.4f",
"s1": "%.4f, %.4f, %.4f",
"s2": "%.4f, %.4f, %.4f",
"shoebox": "%.1f",
"rlp": "%.4f, %.4f, %.4f",
"zeta": "%.3f",
"x_resid": "%.3f",
"x_resid2": "%.3f",
"y_resid": "%.3f",
"y_resid2": "%.3f",
"kapton_absorption_correction": "%.3f",
"kapton_absorption_correction_sigmas": "%.3f",
"inverse_scale_factor": "%.3f",
"inverse_scale_factor_variance": "%.3f",
}
for rlist in reflections:
from dials.algorithms.shoebox import MaskCode
foreground_valid = MaskCode.Valid | MaskCode.Foreground
text.append("")
text.append(f"Reflection list contains {len(rlist)} reflections")
if len(rlist) == 0:
continue
rows = [["Column", "min", "max", "mean"]]
for k, col in rlist.cols():
if k in formats and "%" not in formats.get(k, "%s"):
# Allow blanking out of entries that wouldn't make sense
rows.append(
[
k,
formats.get(k, "%s"),
formats.get(k, "%s"),
formats.get(k, "%s"),
]
)
elif type(col) in (flex.double, flex.int, flex.size_t):
if type(col) in (flex.int, flex.size_t):
col = col.as_double()
rows.append(
[
k,
formats.get(k, "%s") % flex.min(col),
formats.get(k, "%s") % flex.max(col),
formats.get(k, "%s") % flex.mean(col),
]
)
elif type(col) in (flex.vec3_double, flex.miller_index):
if isinstance(col, flex.miller_index):
col = col.as_vec3_double()
rows.append(
[
k,
formats.get(k, "%.2f, %.2f, %.2f") % col.min(),
formats.get(k, "%.2f, %.2f, %.2f") % col.max(),
formats.get(k, "%.2f, %.2f, %.2f") % col.mean(),
]
)
elif isinstance(col, flex.shoebox):
rows.append([k, "", "", ""])
si = col.summed_intensity().observed_value()
rows.append(
[
" summed I",
formats.get(k, "%s") % flex.min(si),
formats.get(k, "%s") % flex.max(si),
formats.get(k, "%s") % flex.mean(si),
]
)
x1, x2, y1, y2, z1, z2 = col.bounding_boxes().parts()
bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
rows.append(
[
" N pix",
formats.get(k, "%s") % flex.min(bbox_sizes),
formats.get(k, "%s") % flex.max(bbox_sizes),
formats.get(k, "%s") % flex.mean(bbox_sizes),
]
)
fore_valid = col.count_mask_values(foreground_valid).as_double()
rows.append(
[
" N valid foreground pix",
formats.get(k, "%s") % flex.min(fore_valid),
formats.get(k, "%s") % flex.max(fore_valid),
formats.get(k, "%s") % flex.mean(fore_valid),
]
)
text.append(tabulate(rows, headers="firstrow"))
if show_flags:
text.append(_create_flag_count_table(rlist))
if show_identifiers:
if rlist.experiment_identifiers():
text.append(
"""Experiment identifiers id-map values:\n%s"""
% (
"\n".join(
"id:"
+ str(k)
+ " -> experiment identifier:"
+ str(rlist.experiment_identifiers()[k])
for k in rlist.experiment_identifiers().keys()
)
)
)
intensity_keys = (
"miller_index",
"d",
"intensity.prf.value",
"intensity.prf.variance",
"intensity.sum.value",
"intensity.sum.variance",
"background.mean",
"profile.correlation",
"profile.rmsd",
)
profile_fit_keys = ("miller_index", "d")
centroid_keys = (
"miller_index",
"d",
"xyzcal.mm",
"xyzcal.px",
"xyzobs.mm.value",
"xyzobs.mm.variance",
"xyzobs.px.value",
"xyzobs.px.variance",
)
keys_to_print = OrderedSet()
if show_intensities:
for k in intensity_keys:
keys_to_print.add(k)
if show_profile_fit:
for k in profile_fit_keys:
keys_to_print.add(k)
if show_centroids:
for k in centroid_keys:
keys_to_print.add(k)
if show_all_reflection_data:
for k in formats:
keys_to_print.add(k)
def format_column(key, data, format_strings=None):
if isinstance(data, flex.vec3_double):
c_strings = [
c.as_string(format_strings[i].strip())
for i, c in enumerate(data.parts())
]
elif isinstance(data, flex.miller_index):
c_strings = [
c.as_string(format_strings[i].strip())
for i, c in enumerate(data.as_vec3_double().parts())
]
elif isinstance(data, flex.size_t):
c_strings = [data.as_int().as_string(format_strings[0].strip())]
elif isinstance(data, flex.shoebox):
x1, x2, y1, y2, z1, z2 = data.bounding_boxes().parts()
bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
c_strings = [bbox_sizes.as_string(format_strings[0].strip())]
key += " (N pix)"
else:
c_strings = [data.as_string(format_strings[0].strip())]
column = flex.std_string()
max_element_lengths = [c.max_element_length() for c in c_strings]
for i in range(len(c_strings[0])):
column.append(
f"%{len(key)}s"
% ", ".join(
("%%%is" % max_element_lengths[j]) % c_strings[j][i]
for j in range(len(c_strings))
)
)
return column
if keys_to_print:
keys = [k for k in keys_to_print if k in rlist]
if max_reflections is not None:
max_reflections = min(len(rlist), max_reflections)
else:
max_reflections = len(rlist)
columns = []
for k in keys:
columns.append(
format_column(k, rlist[k], format_strings=formats[k].split(","))
)
text.append("")
text.append("Printing %i of %i reflections:" % (max_reflections, len(rlist)))
line = []
for j in range(len(columns)):
key = keys[j]
if key == "shoebox":
key += " (N pix)"
width = max(len(key), columns[j].max_element_length())
line.append("%%%is" % width % key)
text.append(" ".join(line))
for i in range(max_reflections):
line = (c[i] for c in columns)
text.append(" ".join(line))
return "\n".join(text)
if __name__ == "__main__":
run()
| bsd-3-clause | ccb5b7cf5216cdaca4c4f2d9b2cbb8bf | 32.014388 | 88 | 0.507823 | 3.544724 | false | false | false | false |
douban/dpark | dpark/utils/tdigest.py | 1 | 7283 | from math import isnan, ceil, pi
from six.moves import range
class Centroid(object):
def __init__(self, x, w=1):
self.__mean = float(x)
self.__count = float(w)
@property
def mean(self):
return self.__mean
@property
def count(self):
return self.__count
def __repr__(self):
return """<Centroid: mean==%.8f, count=%d>""" % (self.mean, self.count)
def __eq__(self, other):
if isinstance(other, Centroid):
return self.mean == other.mean and self.count == other.count
return False
class TDigest(object):
def __init__(self, compression=100, size=None):
self._min = None
self._max = None
self.compression = compression
self._total_weight = 0
self._weight = []
self._mean = []
self._unmerge_weight = 0
self._tmp_weight = []
self._tmp_mean = []
if size is None:
size = int(2 * ceil(compression)) + 10
self._size = size
@staticmethod
def _weighted_average(x1, w1, x2, w2):
a, b = min(x1, x2), max(x1, x2)
x = float(x1 * w1 + x2 * w2) / (w1 + w2)
return max(a, min(b, x))
def __len__(self):
return int(self._total_weight + self._unmerge_weight)
def __add__(self, other):
if not isinstance(other, TDigest):
raise TypeError('Can not add {} with {}'.format(
self.__class__.__name__,
other.__class__.__name__,
))
if len(other) == 0:
return self
other.compress()
self._tmp_mean.extend(other._mean)
self._tmp_weight.extend(other._weight)
total = sum(other._weight)
self._unmerge_weight = total
self.compress()
return self
def add(self, x, w=1):
x = float(x)
w = float(w)
if isnan(x):
raise ValueError('Cannot add NaN')
if len(self._tmp_weight) + len(self._weight) >= self._size - 1:
self.compress()
self._tmp_weight.append(w)
self._tmp_mean.append(x)
self._unmerge_weight += w
def compress(self):
if self._unmerge_weight > 0:
self._merge(self._tmp_weight, self._tmp_mean)
self._tmp_weight = []
self._tmp_mean = []
self._unmerge_weight = 0
def _merge(self, incoming_weight, incoming_mean):
def _argsort(seq):
return sorted(range(len(seq)), key=seq.__getitem__)
incoming_weight = incoming_weight + self._weight
incoming_mean = incoming_mean + self._mean
assert incoming_weight
incoming_order = _argsort(incoming_mean)
self._total_weight += self._unmerge_weight
normalizer = self.compression / (pi * self._total_weight)
mean = []
weight = []
mean.append(incoming_mean[incoming_order[0]])
weight.append(incoming_weight[incoming_order[0]])
w_so_far = 0.
for ix in incoming_order[1:]:
proposed_weight = weight[-1] + incoming_weight[ix]
z = proposed_weight * normalizer
q0 = w_so_far / self._total_weight
q2 = (w_so_far + proposed_weight) / self._total_weight
if z * z <= q0 * (1 - q0) and z * z <= q2 * (1 - q2):
weight[-1] += incoming_weight[ix]
mean[-1] = mean[-1] + (incoming_mean[ix] - mean[-1]) * incoming_weight[ix] / weight[-1]
else:
w_so_far += weight[-1]
mean.append(incoming_mean[ix])
weight.append(incoming_weight[ix])
self._mean = mean
self._weight = weight
# assert sum(weight) == self._total_weight
if self._total_weight > 0:
self._min = mean[0] if self._min is None else min(self._min, mean[0])
self._max = mean[-1] if self._max is None else max(self._max, mean[-1])
def quantile(self, q):
q = float(q)
if not 0 <= q <= 1:
raise ValueError('q should be in [0, 1], got {}'.format(q))
self.compress()
weight = self._weight
mean = self._mean
if not weight:
return float('nan')
elif len(weight) == 1:
return mean[0]
index = q * self._total_weight
if index < weight[0] / 2:
return self._min + 2. * index / weight[0] * (mean[0] - self._min)
weight_so_far = weight[0] / 2.
for i in range(len(weight) - 1):
dw = (weight[i] + weight[i + 1]) / 2.
if weight_so_far + dw > index:
z1 = index - weight_so_far
z2 = weight_so_far + dw - index
return self._weighted_average(mean[i], z2, mean[i + 1], z1)
weight_so_far += dw
assert index <= self._total_weight
assert index >= self._total_weight - weight[-1] / 2.
z1 = index - self._total_weight - weight[-1] / 2.
z2 = weight[-1] / 2. - z1
return self._weighted_average(mean[-1], z1, self._max, z2)
def cdf(self, x):
x = float(x)
self.compress()
weight = self._weight
mean = self._mean
if not weight:
return float('nan')
elif len(weight) == 1:
width = self._max - self._min
if x < self._min:
return 0.
elif x > self._max:
return 1.
elif x - self._min <= width:
return 0.5
else:
return (x - self._min) / (self._max - self._min)
if x < self._min:
return 0.
if x > self._max:
return 1.
if x <= mean[0]:
if mean[0] - self._min > 0:
return (x - self._min) / (mean[0] - self._min) * weight[0] / self._total_weight / 2.
else:
return 0.
if x >= mean[-1]:
if self._max - mean[-1] > 0:
return 1. - (self._max - x) / (self._max - mean[-1]) * weight[-1] / self._total_weight / 2.
else:
return 1.
weight_so_far = weight[0] / 2.
for it in range(len(weight) - 1):
if mean[it] == x:
w0 = weight_so_far
weight_so_far += sum(
weight[i] + weight[i + 1]
for i in range(it, len(weight) - 1)
if mean[i + 1] == x
)
return (w0 + weight_so_far) / 2. / self._total_weight
if mean[it] <= x < mean[it + 1]:
if mean[it + 1] - mean[it] > 0:
dw = (weight[it] + weight[it + 1]) / 2.
return (weight_so_far +
dw * (x - mean[it]) / (mean[it + 1] - mean[it])) / self._total_weight
else:
dw = (weight[it] + weight[it + 1]) / 2.
return weight_so_far + dw / self._total_weight
weight_so_far += (weight[it] + weight[it + 1]) / 2.
assert False
@property
def centroids(self):
self.compress()
weight = self._weight
mean = self._mean
return [Centroid(mean[i], weight[i]) for i in range(len(self._weight))]
| bsd-3-clause | acaa15c58ccbd1eeb1c42a46c511c01c | 29.219917 | 107 | 0.481395 | 3.670867 | false | false | false | false |
dials/dials | src/dials/command_line/cluster_exec.py | 1 | 1592 | # LIBTBX_SET_DISPATCHER_NAME cluster.dials.exec
from __future__ import annotations
import pickle
import dials.util
def get_cwd():
"""
Get the current working directory
"""
import sys
return sys.argv[1]
def get_tid():
"""
Get the task id
"""
import os
# FIXME At the moment, there is no portable way to know the task id through
# drmaa. This is really annoying. So we have to use the SGE_TASK_ID.
# Therefore, this will only work for SGE. We can probably add support for
# other systems as and when needed.
if "SGE_TASK_ID" in os.environ:
return os.environ["SGE_TASK_ID"]
else:
raise KeyError("Could not find task id")
@dials.util.show_mail_handle_errors()
def run(_=None):
import traceback
from os.path import exists, join
from time import sleep
# Get the task id and the current working directory
tid = get_tid()
cwd = get_cwd()
# Set the paths
input_fn = join(cwd, f"{tid}.input")
output_fn = join(cwd, f"{tid}.output")
# Wait until it exists
while not exists(input_fn):
sleep(1)
# Try to run the function, otherwise return an exception
try:
with open(input_fn, "rb") as infile:
function, element = pickle.load(infile)
result = function(element)
except Exception as e:
e.args = [traceback.format_exc()]
result = e
# Dump the result
with open(output_fn, "wb") as outfile:
pickle.dump(result, outfile, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
run()
| bsd-3-clause | 1458942fd2c7c727aa66112d75ab01c6 | 22.072464 | 79 | 0.625 | 3.668203 | false | false | false | false |
dials/dials | src/dials/util/image_viewer/slip_viewer/calibration_frame.py | 1 | 7837 | from __future__ import annotations
import os
import wx
from scitbx.matrix import col
class SBSettingsFrame(wx.MiniFrame):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
szr = wx.BoxSizer(wx.VERTICAL)
panel = SBSettingsPanel(self)
self.SetSizer(szr)
szr.Add(panel, 1, wx.EXPAND)
szr.Fit(panel)
self.panel = panel
self.sizer = szr
self.Fit()
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy(), self)
# XXX Could have a set_image() function instead of referring back to
# the frame all the time?
class SBSettingsPanel(wx.Panel):
# XXX Names: they're not really settings. XXX Allow for setting
# rotation, and provide a hierarchical drop-down menu to play with
# detector, panel, sensor and ASIC.
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
# Number of decimal digits for distances.
self.digits = 2
# Quad translation controls
from wx.lib.agw.floatspin import EVT_FLOATSPIN, FloatSpin
img = self.GetParent().GetParent().pyslip.tiles.raw_image
d = img.get_detector()
self._quad_spinners = []
for serial in range(4):
fast, slow = d.hierarchy()[serial].get_origin()[0:2]
name_quadrant = ["Q0", "Q1", "Q2", "Q3"][serial]
box = wx.BoxSizer(wx.HORIZONTAL)
for (name_direction, value) in [("fast", fast), ("slow", slow)]:
name_ctrl = name_quadrant + "_" + name_direction + "_ctrl"
spinner = FloatSpin(
self, digits=self.digits, name=name_ctrl, value=value
)
self.Bind(EVT_FLOATSPIN, self.OnUpdateQuad, spinner)
box.Add(
spinner,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label=name_quadrant + " " + name_direction),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
setattr(self, "_" + name_ctrl, spinner)
self._quad_spinners.append(spinner)
sizer.Add(box)
# Spinner amount control
box = wx.BoxSizer(wx.HORIZONTAL)
self._spinner_amt_control = FloatSpin(
self,
digits=self.digits,
name="spin_amount",
value=1,
min_val=0.1,
increment=0.1,
)
self.Bind(EVT_FLOATSPIN, self.OnSpinAmount, self._spinner_amt_control)
box.Add(
self._spinner_amt_control,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="Spinner increment (mm)"),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
sizer.Add(box)
box = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(self, label="Restore metrology")
box.Add(btn, flag=wx.ALL, border=5)
self.Bind(wx.EVT_BUTTON, self.OnRestoreMetrology, btn)
btn = wx.Button(self, label="Save current metrology")
box.Add(btn, flag=wx.ALL, border=5)
self.Bind(wx.EVT_BUTTON, self.OnSaveMetrology, btn)
sizer.Add(box, flag=wx.ALIGN_CENTER)
# XXX Rename to metrology tool?
def OnRestoreMetrology(self, event):
print("Not implemented")
def OnSaveMetrology(self, event):
import pycbf
dialog = wx.FileDialog(
self,
defaultDir=os.curdir,
defaultFile="quadrants.def",
message="Save metrology file",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
wildcard="Phil files (*.def)|*.def",
)
if dialog.ShowModal() == wx.ID_OK:
path = str(dialog.GetPath())
if path != "":
# The detector object of the format instance is adjusted when the quadrant calibration
# arrows are clicked. Sync those adjustments to a new cbf handle, drop unneeded categories
# (categories frame specific but not metrology specific) and write the file.
frame = self.GetParent().GetParent()
img = frame.pyslip.tiles.raw_image
header = img.image_set.get_format_class()(img.full_path)
header.sync_detector_to_cbf(img.get_detector())
cbf = header._cbf_handle
cbf.find_category("array_data")
cbf.remove_category()
cbf.find_category("array_structure")
cbf.remove_category()
cbf.find_category("array_intensities")
cbf.remove_category()
cbf.find_category("diffrn_radiation")
cbf.remove_category()
cbf.find_category("diffrn_radiation_wavelength")
cbf.remove_category()
cbf.find_category("diffrn_measurement")
cbf.remove_category()
cbf.find_category("diffrn_scan")
cbf.remove_category()
cbf.find_category("diffrn_scan_frame")
cbf.remove_category()
cbf.write_widefile(
path,
pycbf.CBF,
pycbf.MIME_HEADERS | pycbf.MSG_DIGEST | pycbf.PAD_4K,
0,
)
print("Saved cbf header to", path)
def OnUpdateQuad(self, event):
# Get the name of the spinner and its delta, the deviation from
# the default value. Update the default for the next event.
obj = event.EventObject
name = obj.GetName()
value = obj.GetValue()
delta = float(value - obj.GetDefaultValue())
obj.SetDefaultValue(value)
# Update the frame's effective metrology parameters.
frame = self.GetParent().GetParent()
img = frame.pyslip.tiles.raw_image
quads = img.get_detector().hierarchy()
if name == "Q0_fast_ctrl":
quad, delta = (quads[0], col((delta, 0, 0)))
elif name == "Q0_slow_ctrl":
quad, delta = (quads[0], col((0, delta, 0)))
elif name == "Q1_fast_ctrl":
quad, delta = (quads[1], col((delta, 0, 0)))
elif name == "Q1_slow_ctrl":
quad, delta = (quads[1], col((0, delta, 0)))
elif name == "Q2_fast_ctrl":
quad, delta = (quads[2], col((delta, 0, 0)))
elif name == "Q2_slow_ctrl":
quad, delta = (quads[2], col((0, delta, 0)))
elif name == "Q3_fast_ctrl":
quad, delta = (quads[3], col((delta, 0, 0)))
elif name == "Q3_slow_ctrl":
quad, delta = (quads[3], col((0, delta, 0)))
else:
raise RuntimeError("Unknown control name " + name)
ldm = quad.get_local_d_matrix()
fast = (ldm[0], ldm[3], ldm[6])
slow = (ldm[1], ldm[4], ldm[7])
orig = col((ldm[2], ldm[5], ldm[8])) + delta
quad.set_local_frame(fast, slow, orig)
# Update the view, trigger redraw.
tiles = frame.pyslip.tiles
tiles.set_image(tiles.raw_image)
tiles.flex_image.adjust(color_scheme=tiles.current_color_scheme)
tiles.reset_the_cache()
tiles.tile_cache = tiles.cache[tiles.zoom_level]
tiles.tile_list = tiles.lru[tiles.zoom_level]
frame.pyslip.Update()
def OnSpinAmount(self, event):
obj = event.EventObject
for spinner in self._quad_spinners:
spinner.SetIncrement(obj.GetValue())
| bsd-3-clause | c86963b03f7b8e30a80d62aff02f9b7b | 34.301802 | 107 | 0.535281 | 3.769601 | false | false | false | false |
dials/dials | src/dials/util/resolution_analysis.py | 1 | 25324 | """
Algorithms for analysis of resolution limits.
"""
from __future__ import annotations
import enum
import logging
import math
import typing
import iotbx.merging_statistics
import iotbx.mtz
import iotbx.phil
from cctbx import miller, uctbx
from cctbx.array_family import flex
from iotbx.reflection_file_utils import label_table
from scitbx.math import curve_fitting, five_number_summary
from dials.algorithms.scaling.scaling_library import determine_best_unit_cell
from dials.report import plots
from dials.util import Sorry, tabulate
from dials.util.batch_handling import (
assign_batches_to_reflections,
calculate_batch_offsets,
)
from dials.util.filter_reflections import filter_reflection_table
from dials.util.normalisation import quasi_normalisation
logger = logging.getLogger(__name__)
class metrics(enum.Enum):
"""Supported metrics for estimation of resolution limits."""
CC_HALF = "cc_half"
CC_REF = "cc_ref"
ISIGMA = "unmerged_i_over_sigma_mean"
MISIGMA = "i_over_sigma_mean"
I_MEAN_OVER_SIGMA_MEAN = "i_mean_over_sigi_mean"
RMERGE = "r_merge"
COMPLETENESS = "completeness"
def polynomial_fit(x, y, degree=5):
"""
Fit a polynomial to the values y(x) and return this fit
x, y should be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma.
"""
fit = curve_fitting.univariate_polynomial_fit(
x, y, degree=degree, max_iterations=100
)
f = curve_fitting.univariate_polynomial(*fit.params)
return f(x)
def tanh_fit(x, y, iqr_multiplier=None):
"""
Fit a tanh function to the values y(x) and return this fit
x, y should be iterables containing floats of the same size. This is used for
fitting a curve to CC½.
"""
tf = curve_fitting.tanh_fit(x, y)
f = curve_fitting.tanh(*tf.params)
if iqr_multiplier:
assert iqr_multiplier > 0
yc = f(x)
dy = y - yc
min_x, q1_x, med_x, q3_x, max_x = five_number_summary(dy)
iqr_x = q3_x - q1_x
cut_x = iqr_multiplier * iqr_x
outliers = (dy > q3_x + cut_x) | (dy < q1_x - cut_x)
if outliers.count(True) > 0:
xo = x.select(~outliers)
yo = y.select(~outliers)
tf = curve_fitting.tanh_fit(xo, yo)
f = curve_fitting.tanh(*tf.params)
return f(x)
def log_fit(x, y, degree=5):
"""Fit the values log(y(x)) then return exp() to this fit.
x, y should be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma."""
fit = curve_fitting.univariate_polynomial_fit(
x, flex.log(y), degree=degree, max_iterations=100
)
f = curve_fitting.univariate_polynomial(*fit.params)
return flex.exp(f(x))
def log_inv_fit(x, y, degree=5):
"""Fit the values log(1 / y(x)) then return the inverse of this fit.
x, y should be iterables, the order of the polynomial for the transformed
fit needs to be specified. This will be useful for e.g. Rmerge."""
fit = curve_fitting.univariate_polynomial_fit(
x, flex.log(1 / y), degree=degree, max_iterations=100
)
f = curve_fitting.univariate_polynomial(*fit.params)
return 1 / flex.exp(f(x))
def resolution_fit_from_merging_stats(merging_stats, metric, model, limit, sel=None):
"""Estimate a resolution limit based on the input `metric`
The function defined by `model` will be fit to the selected `metric` which has been
pre-calculated by the `merging_stats` object. The estimated resolution limit is
chosen as the `d_star_sq` value at which the fitted function equals `limit`.
Args:
merging_stats (iotbx.merging_statistics.dataset_statistics): Pre-calculated
merging statistics object
metric (str): The metric to use for estimating a resolution limit. Must be a
metric calculated by `iotbx.merging_statistics.merging_stats` and
available as an attribute on the `bins` attribute of the input
`merging_stats` object.
model: The function to fit to the selected metric. Must be callable, taking as
input x (d_star_sq) and y (the metric to be fitted) values, returning the
fitted y(x) values.
limit (float): The resolution limit criterion.
sel (scitbx.array_family.flex.bool): An optional selection to apply to the
`merging_stats` bins.
Returns: The estimated resolution limit in units of Å^-1
"""
y_obs = flex.double(getattr(b, metric) for b in merging_stats.bins).reversed()
d_star_sq = flex.double(
uctbx.d_as_d_star_sq(b.d_min) for b in merging_stats.bins
).reversed()
return resolution_fit(d_star_sq, y_obs, model, limit, sel=sel)
def resolution_fit(d_star_sq, y_obs, model, limit, sel=None):
"""Estimate a resolution limit based on the input merging statistics
The function defined by `model` will be fit to the input `d_star_sq` and `y_obs`.
The estimated resolution limit is chosen as the `d_star_sq` value at which the
fitted function equals `limit`.
Args:
d_star_sq (scitbx.array_family.flex.double): The high resolution limits of the
resolution bins in units 1/d*2
y_obs (scitbx.array_family.flex.double): The statistic against which to fit the
function `model`
model: The function to fit against `y_obs`. Must be callable, taking as input x
(d_star_sq) and y (the metric to be fitted) values, returning the fitted
y(x) values.
limit (float): The resolution limit criterion.
sel (scitbx.array_family.flex.bool): An optional selection to apply to the
`d_star_sq` and `y_obs` values.
Returns: The estimated resolution limit in units of Å^-1
Raises:
RuntimeError: Raised if no `y_obs` values remain after application of the
selection `sel`
"""
if not sel:
sel = flex.bool(len(d_star_sq), True)
sel &= y_obs > 0
y_obs = y_obs.select(sel)
d_star_sq = d_star_sq.select(sel)
if not len(y_obs):
raise RuntimeError("No reflections left for fitting")
y_fit = model(d_star_sq, y_obs, 6)
logger.debug(
tabulate(
[("d*2", "d", "obs", "fit")]
+ [
(ds2, uctbx.d_star_sq_as_d(ds2), yo, yf)
for ds2, yo, yf in zip(d_star_sq, y_obs, y_fit)
],
headers="firstrow",
)
)
if flex.min(y_obs) > limit:
d_min = 1.0 / math.sqrt(flex.max(d_star_sq))
else:
try:
d_min = 1.0 / math.sqrt(interpolate_value(d_star_sq, y_fit, limit))
except RuntimeError as e:
logger.debug(f"Error interpolating value: {e}")
d_min = None
return ResolutionResult(d_star_sq, y_obs, y_fit, d_min)
def _get_cc_half_significance(merging_stats, cc_half_method):
"""Get the CC½ significance values from the input merging_stats object"""
if (
cc_half_method == "sigma_tau"
and merging_stats.overall.cc_one_half_sigma_tau_significance is not None
):
return flex.bool(
b.cc_one_half_sigma_tau_significance for b in merging_stats.bins
).reversed()
elif merging_stats.overall.cc_one_half_significance is not None:
return flex.bool(
b.cc_one_half_significance for b in merging_stats.bins
).reversed()
def _get_cc_half_critical_values(merging_stats, cc_half_method):
"""Get the CC½ critical values from the input merging_stats object"""
if (
cc_half_method == "sigma_tau"
and merging_stats.overall.cc_one_half_sigma_tau_critical_value is not None
):
return flex.double(
b.cc_one_half_sigma_tau_critical_value for b in merging_stats.bins
).reversed()
elif merging_stats.overall.cc_one_half_critical_value is not None:
critical = [
b.cc_one_half_critical_value
if b.cc_one_half_critical_value is not None
else 0.0
for b in merging_stats.bins
]
return flex.double(critical).reversed()
def resolution_cc_half(
merging_stats, limit, cc_half_method="half_dataset", model=tanh_fit
):
"""Estimate a resolution limit based on CC½
The function defined by `model` will be fit to the CC½ values that have been
pre-calculated by the `merging_stats` object. The estimated resolution limit is
chosen as the `d_star_sq` value at which the fitted function equals `limit`.
Args:
merging_stats (iotbx.merging_statistics.dataset_statistics): Pre-calculated
merging statistics object
cc_half_method (str): The method for calculating CC½. Either "half_dataset" or
"sigma_tau" (See Assmann et al., J. Appl. Cryst. (2016). 49, 1021–1028).
model: The function to fit to the selected metric. Must be callable, taking as
input x (d_star_sq) and y (the metric to be fitted) values, returning the
fitted y(x) values. Default is `tanh_fit`.
limit (float): The resolution limit criterion.
Returns: The estimated resolution limit in units of Å^-1
"""
sel = _get_cc_half_significance(merging_stats, cc_half_method)
metric = "cc_one_half_sigma_tau" if cc_half_method == "sigma_tau" else "cc_one_half"
result = resolution_fit_from_merging_stats(
merging_stats, metric, model, limit, sel=sel
)
critical_values = _get_cc_half_critical_values(merging_stats, cc_half_method)
if critical_values:
result = result._replace(critical_values=critical_values.select(sel))
return result
def interpolate_value(x, y, t):
"""Find the value of x: y(x) = t."""
if t > max(y) or t < min(y):
raise RuntimeError(f"t outside of [{min(y):f}, {max(y):f}]")
for j in range(1, len(x)):
x0 = x[j - 1]
y0 = y[j - 1]
x1 = x[j]
y1 = y[j]
if (y0 - t) * (y1 - t) < 0:
return x0 + (t - y0) * (x1 - x0) / (y1 - y0)
def miller_array_from_mtz(unmerged_mtz, anomalous=False, labels=None):
mtz_object = iotbx.mtz.object(file_name=unmerged_mtz)
miller_arrays = mtz_object.as_miller_arrays(
merge_equivalents=False, anomalous=anomalous
)
i_obs = None
batches = None
all_i_obs = []
for array in miller_arrays:
labels = array.info().label_string()
if array.is_xray_intensity_array():
all_i_obs.append(array)
if labels == "BATCH":
assert batches is None
batches = array
if i_obs is None:
if len(all_i_obs) == 0:
raise Sorry("No intensities found")
elif len(all_i_obs) > 1:
if labels is not None:
lab_tab = label_table(all_i_obs)
i_obs = lab_tab.select_array(
label=labels[0], command_line_switch="labels"
)
if i_obs is None:
raise Sorry(
"Multiple intensity arrays - please specify one:\n%s"
% "\n".join(
[" labels=%s" % a.info().label_string() for a in all_i_obs]
)
)
else:
i_obs = all_i_obs[0]
# need original miller indices otherwise we don't get correct anomalous
# merging statistics
if "M_ISYM" in mtz_object.column_labels():
indices = mtz_object.extract_original_index_miller_indices()
i_obs = i_obs.customized_copy(indices=indices, info=i_obs.info())
return i_obs, batches
phil_str = """
rmerge = None
.type = float(value_min=0)
.help = "Maximum value of Rmerge in the outer resolution shell"
.short_caption = "Outer shell Rmerge"
.expert_level = 1
completeness = None
.type = float(value_min=0)
.help = "Minimum completeness in the outer resolution shell"
.short_caption = "Outer shell completeness"
.expert_level = 1
cc_ref = 0.1
.type = float(value_min=0)
.help = "Minimum value of CC vs reference data set in the outer resolution shell"
.short_caption = "Outer shell CCref"
.expert_level = 1
cc_half = 0.3
.type = float(value_min=0)
.help = "Minimum value of CC½ in the outer resolution shell"
.short_caption = "Outer shell CC½"
.expert_level = 1
cc_half_method = *half_dataset sigma_tau
.type = choice
.short_caption = "CC½ method"
cc_half_significance_level = 0.1
.type = float(value_min=0, value_max=1)
.expert_level = 1
.short_caption = "CC½ significance level"
cc_half_fit = polynomial *tanh
.type = choice
.expert_level = 1
.short_caption = "CC½ fit"
isigma = None
.type = float(value_min=0)
.help = "Minimum value of the unmerged <I/sigI> in the outer resolution shell"
.short_caption = "Outer shell unmerged <I/sigI>"
.expert_level = 1
misigma = None
.type = float(value_min=0)
.help = "Minimum value of the merged <I/sigI> in the outer resolution shell"
.short_caption = "Outer shell merged <I/sigI>"
.expert_level = 1
i_mean_over_sigma_mean = None
.type = float(value_min=0)
.help = "Minimum value of the unmerged <I>/<sigI> in the outer resolution shell"
.short_caption = "Outer shell unmerged <I>/<sigI>"
.expert_level = 2
nbins = 100
.type = int
.help = "Maximum number of resolution bins to use for estimation of resolution limit."
.short_caption = "Number of resolution bins."
.expert_level = 1
reflections_per_bin = 10
.type = int
.help = "Minimum number of reflections per bin."
.short_caption = "Minimum number of reflections per bin"
binning_method = *counting_sorted volume
.type = choice
.help = "Use equal-volume bins or bins with approximately equal numbers of reflections per bin."
.short_caption = "Equal-volume or equal #ref binning."
.expert_level = 1
anomalous = False
.type = bool
.help = "Keep anomalous pairs separate in merging statistics"
.short_caption = "Anomalous"
.expert_level = 1
labels = None
.type = strings
.short_caption = "Labels"
space_group = None
.type = space_group
.expert_level = 1
.short_caption = "Space group"
reference = None
.type = path
.short_caption = "Reference"
emax = 4
.type = float(value_min = 0)
.help = "Reject reflections with normalised intensities E^2 > emax^2"
.short_caption = "Maximum normalised intensity"
"""
phil_defaults = iotbx.phil.parse(
"""
resolution {
%s
batch_range = None
.type = ints(size=2, value_min=0)
}
"""
% phil_str
)
def plot_result(metric, result):
if metric == metrics.CC_HALF:
return plots.cc_half_plot(
result.d_star_sq,
result.y_obs,
cc_half_critical_values=result.critical_values,
cc_half_fit=result.y_fit,
d_min=result.d_min,
)
else:
d = {
metrics.MISIGMA: "Merged <I/σ(I)>",
metrics.ISIGMA: "Unmerged <I/σ(I)>",
metrics.I_MEAN_OVER_SIGMA_MEAN: "<I>/<σ(I)>",
metrics.RMERGE: "R<sub>merge</sub> ",
metrics.COMPLETENESS: "Completeness",
}
d_star_sq_tickvals, d_star_sq_ticktext = plots.d_star_sq_to_d_ticks(
result.d_star_sq, 5
)
return {
"data": [
{
"x": list(result.d_star_sq), # d_star_sq
"y": list(result.y_obs),
"type": "scatter",
"name": "y_obs",
},
(
{
"x": list(result.d_star_sq),
"y": list(result.y_fit),
"type": "scatter",
"name": "y_fit",
"line": {"color": "rgb(47, 79, 79)"},
}
if result.y_fit
else {}
),
(
{
"x": [uctbx.d_as_d_star_sq(result.d_min)] * 2,
"y": [
0,
max(
1,
flex.max(result.y_obs),
flex.max(result.y_fit) if result.y_fit else 0,
),
],
"type": "scatter",
"name": f"d_min = {result.d_min:.2f} Å",
"mode": "lines",
"line": {"color": "rgb(169, 169, 169)", "dash": "dot"},
}
if result.d_min
else {}
),
],
"layout": {
"title": f"{d.get(metric)} vs. resolution",
"xaxis": {
"title": "Resolution (Å)",
"tickvals": d_star_sq_tickvals,
"ticktext": d_star_sq_ticktext,
},
"yaxis": {"title": d.get(metric), "rangemode": "tozero"},
},
}
class ResolutionResult(typing.NamedTuple):
d_star_sq: flex.double
y_obs: flex.double
y_fit: flex.double
d_min: float
critical_values: flex.double = None
class Resolutionizer:
"""A class to calculate things from merging reflections."""
def __init__(self, i_obs, params, batches=None, reference=None):
self._params = params
self._reference = reference
if self._reference is not None:
self._reference = self._reference.merge_equivalents(
use_internal_variance=False
).array()
i_obs = i_obs.customized_copy(
anomalous_flag=params.anomalous, info=i_obs.info()
)
if self._params.batch_range is not None and batches is not None:
batch_min, batch_max = self._params.batch_range
assert batches is not None
sel = (batches.data() >= batch_min) & (batches.data() <= batch_max)
i_obs = i_obs.select(sel).set_info(i_obs.info())
if self._params.space_group is not None:
i_obs = i_obs.customized_copy(
space_group_info=self._params.space_group, info=i_obs.info()
)
if self._params.emax:
normalised = quasi_normalisation(i_obs)
e2_cutoff = self._params.emax**2
sel = normalised.data() < e2_cutoff
logger.info(
f"Removing {sel.count(False)} Wilson outliers with E^2 >= {e2_cutoff}"
)
i_obs = i_obs.select(sel)
self._intensities = i_obs
self._merging_statistics = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
n_bins=self._params.nbins,
reflections_per_bin=self._params.reflections_per_bin,
cc_one_half_significance_level=self._params.cc_half_significance_level,
cc_one_half_method=self._params.cc_half_method,
binning_method=self._params.binning_method,
anomalous=params.anomalous,
use_internal_variance=False,
eliminate_sys_absent=False,
assert_is_not_unique_set_under_symmetry=False,
)
@classmethod
def from_unmerged_mtz(cls, scaled_unmerged, params):
"""Construct the resolutionizer from an mtz file."""
i_obs, batches = miller_array_from_mtz(
scaled_unmerged, anomalous=params.anomalous, labels=params.labels
)
if params.reference is not None:
reference, _ = miller_array_from_mtz(
params.reference, anomalous=params.anomalous, labels=params.labels
)
else:
reference = None
return cls(i_obs, params, batches=batches, reference=reference)
@classmethod
def from_reflections_and_experiments(cls, reflection_tables, experiments, params):
"""Construct the resolutionizer from native dials datatypes."""
# add some assertions about data
# do batch assignment (same functions as in dials.export)
offsets = calculate_batch_offsets(experiments)
reflection_tables = assign_batches_to_reflections(reflection_tables, offsets)
batches = flex.int()
intensities = flex.double()
indices = flex.miller_index()
variances = flex.double()
for table in reflection_tables:
if "intensity.scale.value" in table:
table = filter_reflection_table(
table, ["scale"], partiality_threshold=0.4
)
intensities.extend(table["intensity.scale.value"])
variances.extend(table["intensity.scale.variance"])
else:
table = filter_reflection_table(
table, ["profile"], partiality_threshold=0.4
)
intensities.extend(table["intensity.prf.value"])
variances.extend(table["intensity.prf.variance"])
indices.extend(table["miller_index"])
batches.extend(table["batch"])
crystal_symmetry = miller.crystal.symmetry(
unit_cell=determine_best_unit_cell(experiments),
space_group=experiments[0].crystal.get_space_group(),
assert_is_compatible_unit_cell=False,
)
miller_set = miller.set(crystal_symmetry, indices, anomalous_flag=False)
i_obs = miller.array(miller_set, data=intensities, sigmas=flex.sqrt(variances))
i_obs.set_observation_type_xray_intensity()
i_obs.set_info(miller.array_info(source="DIALS", source_type="refl"))
ms = i_obs.customized_copy()
batch_array = miller.array(ms, data=batches)
if params.reference is not None:
reference, _ = miller_array_from_mtz(
params.reference, anomalous=params.anomalous, labels=params.labels
)
else:
reference = None
return cls(i_obs, params, batches=batch_array, reference=reference)
def resolution(self, metric, limit=None):
if metric == metrics.CC_HALF:
return resolution_cc_half(
self._merging_statistics,
limit,
cc_half_method=self._params.cc_half_method,
model=tanh_fit
if self._params.cc_half_fit == "tanh"
else polynomial_fit,
)
elif metric == metrics.CC_REF:
return self._resolution_cc_ref(limit=self._params.cc_ref)
else:
model = {
metrics.RMERGE: log_inv_fit,
metrics.COMPLETENESS: polynomial_fit,
metrics.ISIGMA: log_fit,
metrics.MISIGMA: log_fit,
metrics.I_MEAN_OVER_SIGMA_MEAN: log_fit,
}[metric]
return resolution_fit_from_merging_stats(
self._merging_statistics, metric.value, model, limit
)
def resolution_auto(self):
"""Compute resolution limits based on the current self._params set."""
metric_to_output = {
metrics.ISIGMA: "I/sig",
metrics.MISIGMA: "Mn(I/sig)",
metrics.I_MEAN_OVER_SIGMA_MEAN: "Mn(I)/Mn(sig)",
}
plot_d = {}
for metric in metrics:
name = metric.name.lower()
limit = getattr(self._params, name)
if metric == metrics.CC_REF and not self._reference:
limit = None
if limit:
try:
result = self.resolution(metric, limit=limit)
except RuntimeError as e:
logger.info(f"Resolution fit against {name} failed: {e}")
continue
pretty_name = metric_to_output.get(metric, name)
if result.d_min:
logger.info(
f"Resolution {pretty_name}:{result.d_min:{18 - len(pretty_name)}.2f}"
)
plot_d[name] = plot_result(metric, result)
return plot_d
def _resolution_cc_ref(self, limit=None):
"""Compute a resolution limit where cc_ref < 0.5 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.cc_ref
intensities = self._intensities.merge_equivalents(
use_internal_variance=False
).array()
cc_s = flex.double()
for b in self._merging_statistics.bins:
cc = intensities.resolution_filter(
d_min=b.d_min, d_max=b.d_max
).correlation(
self._reference.resolution_filter(d_min=b.d_min, d_max=b.d_max),
assert_is_similar_symmetry=False,
)
cc_s.append(cc.coefficient())
cc_s = cc_s.reversed()
fit = tanh_fit if self._params.cc_half_fit == "tanh" else polynomial_fit
d_star_sq = flex.double(
1 / b.d_min**2 for b in self._merging_statistics.bins
).reversed()
return resolution_fit(d_star_sq, cc_s, fit, limit)
| bsd-3-clause | ff984efb1ae36e06979baa491456eafe | 35.198856 | 100 | 0.581156 | 3.57387 | false | false | false | false |
dials/dials | src/dials/algorithms/symmetry/origin.py | 1 | 3885 | """
Analysis of the origin of the diffraction pattern based on indexed and
measured intensities.
"""
from __future__ import annotations
def cctbx_crystal_from_dials(crystal):
space_group = crystal.get_space_group()
unit_cell = crystal.get_unit_cell()
from cctbx.crystal import symmetry as crystal_symmetry
return crystal_symmetry(unit_cell, space_group.type().lookup_symbol())
def cctbx_i_over_sigi_ms_from_dials_data(reflections, cctbx_crystal_symmetry):
from cctbx.miller import set as miller_set
from dials.array_family import flex
refl = reflections.select(reflections["intensity.sum.variance"] > 0)
return miller_set(cctbx_crystal_symmetry, refl["miller_index"]).array(
data=refl["intensity.sum.value"],
sigmas=flex.sqrt(refl["intensity.sum.variance"]),
)
def offset_miller_indices(indices, offset):
from dials.array_family import flex
return flex.miller_index(
*[mi.iround() for mi in (indices.as_vec3_double() + offset).parts()]
)
def compute_miller_set_correlation(
ms_a, ms_b, map_to_asu=False, merge_equivalents=False
):
"""Compute correlation between two miller arrays.
Args:
ms_a (cctbx.miller.array): Input miller.array `a`.
ms_b (cctbx.miller.array): Input miller.array `b`.
map_to_asu (bool): If ``True``, then map miller indices to the asymmetric
unit before matching miller indices between input miller arrays.
merge_equivalents (bool): If ``True`` then merge symmetry equivalent
reflections before matching miller indices between input miller arrays.
Returns:
tuple[int, float]: A tuple of the number of observations and the correlation
coefficient.
"""
if map_to_asu:
# not obvious that this will help for the reasons stated below
ms_a = ms_a.map_to_asu()
ms_b = ms_b.map_to_asu()
if merge_equivalents:
# only want to do this if we have essentially "scaled" the data - if not
# then we will get a smooth Wilson plot and about CC=1 (due to general
# fall off with resolution)
ms_a = ms_a.merge_equivalents().array()
ms_b = ms_b.merge_equivalents().array()
common_a, common_b = ms_a.common_sets(ms_b)
return common_a.size(), common_a.correlation(common_b).coefficient()
def get_hkl_offset_correlation_coefficients(
dials_reflections,
dials_crystal,
map_to_asu=False,
grid_h=0,
grid_k=0,
grid_l=0,
reference=None,
):
# N.B. deliberately ignoring d_min, d_max as these are inconsistent with
# changing the miller indices
from cctbx import sgtbx
from cctbx.miller import set as miller_set
from dials.array_family import flex
cs = cctbx_crystal_from_dials(dials_crystal)
ms = cctbx_i_over_sigi_ms_from_dials_data(dials_reflections, cs)
if reference:
reference_ms = cctbx_i_over_sigi_ms_from_dials_data(reference, cs)
else:
reference_ms = None
ccs = flex.double()
offsets = flex.vec3_int()
nref = flex.size_t()
if reference:
cb_op = sgtbx.change_of_basis_op("x,y,z")
else:
cb_op = sgtbx.change_of_basis_op("-x,-y,-z")
hkl_test = [
(h, k, l)
for h in range(-grid_h, grid_h + 1)
for k in range(-grid_k, grid_k + 1)
for l in range(-grid_l, grid_l + 1)
]
for hkl in hkl_test:
indices = offset_miller_indices(ms.indices(), hkl)
reindexed_indices = cb_op.apply(indices)
rms = miller_set(cs, reindexed_indices).array(ms.data())
if reference_ms:
_ms = reference_ms
else:
_ms = miller_set(cs, indices).array(ms.data())
n, cc = compute_miller_set_correlation(_ms, rms, map_to_asu=map_to_asu)
ccs.append(cc)
offsets.append(hkl)
nref.append(n)
return offsets, ccs, nref
| bsd-3-clause | d4b0d982fdf6cfe0ab0d0de5baa7101d | 29.590551 | 82 | 0.645302 | 3.176615 | false | false | false | false |
dials/dials | src/dials/algorithms/background/modeller.py | 1 | 11518 | from __future__ import annotations
import logging
from dials_algorithms_background_modeller_ext import (
BackgroundStatistics,
MultiPanelBackgroundStatistics,
)
__all__ = [
"BackgroundModeller",
"BackgroundModellerExecutor",
"BackgroundModellerResult",
"BackgroundStatistics",
"FinalizeModel",
"MultiPanelBackgroundStatistics",
]
logger = logging.getLogger(__name__)
class FinalizeModel:
"""
A class to finalize the background model
"""
def __init__(self, experiments, filter_type="median", kernel_size=10, niter=100):
"""
Initialize the finalizer
:param experiments: The experiment list
:param kernel_size: The median filter kernel size
:param niter: The number of iterations for filling holes
"""
from dials.algorithms.background.gmodel import PolarTransform
# Set some parameters
self.filter_type = filter_type
self.kernel_size = kernel_size
self.niter = niter
# Check the input
assert len(experiments) == 1
experiment = experiments[0]
assert len(experiment.detector) == 1
# Save the experiment
self.experiment = experiment
# Create the transform object
self.transform = PolarTransform(
experiment.beam, experiment.detector[0], experiment.goniometer
)
def finalize(self, data, mask):
"""
Finalize the model
:param data: The data array
:param mask: The mask array
"""
from dials.algorithms.image.fill_holes import diffusion_fill, simple_fill
from dials.algorithms.image.filter import mean_filter, median_filter
from dials.array_family import flex
# Print some image properties
sub_data = data.as_1d().select(mask.as_1d())
logger.info("Raw image statistics:")
logger.info(" min: %d", int(flex.min(sub_data)))
logger.info(" max: %d", int(flex.max(sub_data)))
logger.info(" mean: %d", int(flex.mean(sub_data)))
logger.info("")
# Transform to polar
logger.info("Transforming image data to polar grid")
result = self.transform.to_polar(data, mask)
data = result.data()
mask = result.mask()
sub_data = data.as_1d().select(mask.as_1d())
logger.info("Polar image statistics:")
logger.info(" min: %d", int(flex.min(sub_data)))
logger.info(" max: %d", int(flex.max(sub_data)))
logger.info(" mean: %d", int(flex.mean(sub_data)))
logger.info("")
# Filter the image to remove noise
if self.kernel_size > 0:
if self.filter_type == "median":
logger.info("Applying median filter")
data = median_filter(data, mask, (self.kernel_size, 0), periodic=True)
sub_data = data.as_1d().select(mask.as_1d())
logger.info("Median polar image statistics:")
logger.info(" min: %d", int(flex.min(sub_data)))
logger.info(" max: %d", int(flex.max(sub_data)))
logger.info(" mean: %d", int(flex.mean(sub_data)))
logger.info("")
elif self.filter_type == "mean":
logger.info("Applying mean filter")
mask_as_int = mask.as_1d().as_int()
mask_as_int.reshape(mask.accessor())
data = mean_filter(data, mask_as_int, (self.kernel_size, 0), 1)
sub_data = data.as_1d().select(mask.as_1d())
logger.info("Mean polar image statistics:")
logger.info(" min: %d", int(flex.min(sub_data)))
logger.info(" max: %d", int(flex.max(sub_data)))
logger.info(" mean: %d", int(flex.mean(sub_data)))
logger.info("")
else:
raise RuntimeError(f"Unknown filter_type: {self.filter_type}")
# Fill any remaining holes
logger.info("Filling holes")
data = simple_fill(data, mask)
data = diffusion_fill(data, mask, self.niter)
mask = flex.bool(data.accessor(), True)
sub_data = data.as_1d().select(mask.as_1d())
logger.info("Filled polar image statistics:")
logger.info(" min: %d", int(flex.min(sub_data)))
logger.info(" max: %d", int(flex.max(sub_data)))
logger.info(" mean: %d", int(flex.mean(sub_data)))
logger.info("")
# Transform back
logger.info("Transforming image data from polar grid")
result = self.transform.from_polar(data, mask)
data = result.data()
mask = result.mask()
sub_data = data.as_1d().select(mask.as_1d())
logger.info("Final image statistics:")
logger.info(" min: %d", int(flex.min(sub_data)))
logger.info(" max: %d", int(flex.max(sub_data)))
logger.info(" mean: %d", int(flex.mean(sub_data)))
logger.info("")
# Fill in any discontinuities
mask = ~self.transform.discontinuity()[:-1, :-1]
data = diffusion_fill(data, mask, self.niter)
# Get and apply the mask
mask = self.experiment.imageset.get_mask(0)[0]
mask = mask.as_1d().as_int().as_double()
mask.reshape(data.accessor())
data *= mask
# Return the result
return data
class BackgroundModellerResult:
"""
A class to contain the modelling result
"""
def __init__(
self,
mean=None,
variance=None,
dispersion=None,
mask=None,
min_image=None,
max_image=None,
model=None,
polar_model=None,
):
"""
Init the result
"""
self.mean = mean
self.variance = variance
self.dispersion = dispersion
self.mask = mask
self.min_image = min_image
self.max_image = max_image
self.model = model
self.polar_model = polar_model
class BackgroundModellerExecutor:
def __init__(self, experiments, params):
assert len(experiments) == 1
self.min_images = params.modeller.min_images
if self.min_images > len(experiments[0].imageset):
self.min_images = len(experiments[0].imageset)
self.image_type = params.modeller.image_type
self.finalizer = FinalizeModel(
experiments=experiments,
filter_type=params.modeller.filter_type,
kernel_size=params.modeller.kernel_size,
niter=params.modeller.niter,
)
self.result = None
def process(self, image_volume, experiments, reflections):
from dials.algorithms.integration.processor import job
# Write some output
logger.info(
" Background modelling; job: %d; frames: %d -> %d; # Reflections: %d",
job.index,
image_volume.frame0(),
image_volume.frame1(),
len(reflections),
)
# Compute the shoebox mask
reflections.compute_mask(experiments=experiments, image_volume=image_volume)
# Compute the sum, sum^2 and the number of contributing pixels
return MultiPanelBackgroundStatistics(image_volume)
def accumulate(self, index, data):
if self.result is None:
self.result = data
else:
self.result += data
def finalize_model(self):
logger.info("")
logger.info("=" * 80)
logger.info("Finalizing model")
logger.info("")
result = []
for i in range(len(self.result)):
# Get the statistics
stats = self.result.get(i)
mean = stats.mean(self.min_images)
variance = stats.variance(self.min_images)
dispersion = stats.dispersion(self.min_images)
mask = stats.mask(self.min_images)
min_image = stats.min()
max_image = stats.max()
# Create the model
if self.image_type == "min":
model = self.finalizer.finalize(min_image, mask)
elif self.image_type == "mean":
model = self.finalizer.finalize(mean, mask)
else:
raise RuntimeError(f"Unknown image_type: {self.image_type}")
# Add to the list
result.append(
BackgroundModellerResult(
mean=mean,
variance=variance,
dispersion=dispersion,
mask=mask,
min_image=min_image,
max_image=max_image,
model=model,
)
)
return result
class BackgroundModeller:
"""
A class to help with background modelling
"""
def __init__(self, experiments, reflections, params):
"""
Initialize the modeller
:param experiments: The experiment list
:param reflections: The reflections to process
:param params: The parameters to use
"""
# Check all reflections have same imageset and get it
imageset = experiments[0].imageset
for expr in experiments:
assert expr.imageset == imageset, "All experiments must share and imageset"
# Save some stuff
self.experiments = experiments
self.reflections = reflections
self.params = params
self.model = None
def compute(self):
"""
Integrate the data
"""
from dials.algorithms.integration.image_integrator import ProcessorImage
from dials.util.command_line import heading
# Init the report
self.profile_model_report = None
self.integration_report = None
# Create summary format
fmt = (
" Processing the following experiments:\n"
"\n"
" Experiments: %d\n"
" Beams: %d\n"
" Detectors: %d\n"
" Goniometers: %d\n"
" Scans: %d\n"
" Crystals: %d\n"
" Imagesets: %d\n"
)
# Print the summary
logger.info(
fmt,
len(self.experiments),
len(self.experiments.beams()),
len(self.experiments.detectors()),
len(self.experiments.goniometers()),
len(self.experiments.scans()),
len(self.experiments.crystals()),
len(self.experiments.imagesets()),
)
# Print a heading
logger.info("=" * 80)
logger.info("")
logger.info(heading("Modelling background"))
logger.info("")
# Expand n_sigma
for expt in self.experiments:
expt.profile._n_sigma += 2
# Compute some reflection properties
self.reflections.compute_zeta_multi(self.experiments)
self.reflections.compute_d(self.experiments)
self.reflections.compute_bbox(self.experiments)
# Construvt the image integrator processor
processor = ProcessorImage(self.experiments, self.reflections, self.params)
processor.executor = BackgroundModellerExecutor(self.experiments, self.params)
# Do the processing
_, time_info = processor.process()
# Compute the model
self.model = processor.executor.finalize_model()
# Print the time info
logger.info(str(time_info))
logger.info("")
# Return the reflections
return self.model
| bsd-3-clause | 709b2ad9eace00e1bc936f02907ad899 | 31.908571 | 87 | 0.566765 | 4.014639 | false | false | false | false |
dials/dials | src/dials/util/normalisation.py | 1 | 1483 | from __future__ import annotations
from cctbx import uctbx
from scitbx.array_family import flex
def quasi_normalisation(intensities):
"""Quasi-normalisation of the input intensities.
Args:
intensities (cctbx.miller.array): The intensities to be normalised.
Returns:
cctbx.miller.array: The normalised intensities.
"""
# handle negative reflections to minimise effect on mean I values.
work = intensities.deep_copy()
work.data().set_selected(work.data() < 0.0, 0.0)
# set up binning objects
if work.size() > 20000:
n_refl_shells = 20
elif work.size() > 15000:
n_refl_shells = 15
else:
n_refl_shells = 10
d_star_sq = work.d_star_sq().data()
d_star_sq_max = flex.max(d_star_sq)
d_star_sq_min = flex.min(d_star_sq)
span = d_star_sq_max - d_star_sq_min
d_star_sq_max += span * 1e-6
d_star_sq_min -= span * 1e-6
d_star_sq_step = (d_star_sq_max - d_star_sq_min) / n_refl_shells
work.setup_binner_d_star_sq_step(
d_min=uctbx.d_star_sq_as_d(d_star_sq_min), # cctbx/cctbx_project#588
d_max=uctbx.d_star_sq_as_d(d_star_sq_max), # cctbx/cctbx_project#588
d_star_sq_step=d_star_sq_step,
auto_binning=False,
)
normalisations = work.intensity_quasi_normalisations()
return intensities.customized_copy(
data=(intensities.data() / normalisations.data()),
sigmas=(intensities.sigmas() / normalisations.data()),
)
| bsd-3-clause | 591fc044f57ed28c8dc2b76d55aac9a5 | 32.704545 | 77 | 0.636548 | 2.74122 | false | false | false | false |
douban/dpark | examples/kmeans.py | 1 | 1581 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import sys, os, os.path
from six.moves import map
from six.moves import range
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import random
from dpark import DparkContext
from vector import Vector
dpark = DparkContext()
def parseVector(line):
return Vector(list(map(float, line.strip().split(' '))))
def closestCenter(p, centers):
bestDist = p.squaredDist(centers[0])
bestIndex = 0
for i in range(1, len(centers)):
d = p.squaredDist(centers[i])
if d < bestDist:
bestDist = d
bestIndex = i
return bestIndex
if __name__ == '__main__':
D = 4
K = 3
IT = 10
MIN_DIST = 0.01
centers = [Vector([random.random() for j in range(D)]) for i in range(K)]
points = dpark.textFile('kmeans_data.txt').map(parseVector).cache()
for it in range(IT):
print('iteration', it)
mappedPoints = points.map(lambda p: (closestCenter(p, centers), (p, 1)))
ncenters = mappedPoints.reduceByKey(
lambda (s1, c1), (s2, c2): (s1 + s2, c1 + c2)
).map(
lambda id_sum_count: (id_sum_count[0], id_sum_count[1][0] / id_sum_count[1][1])
).collectAsMap()
updated = False
for i in ncenters:
if centers[i].dist(ncenters[i]) > MIN_DIST:
centers[i] = ncenters[i]
updated = True
if not updated:
break
print(centers)
print('final', centers)
| bsd-3-clause | 4067864e4f4f76c678f069afc2f4f08c | 26.736842 | 91 | 0.590765 | 3.349576 | false | false | false | false |
dials/dials | src/dials/util/rebin_images.py | 1 | 3341 | from __future__ import annotations
import binascii
import os
import random
from dxtbx.ext import compress
def gz_open(filename, mode):
import gzip
return gzip.GzipFile(filename, mode)
def split_counts(image, split):
from scitbx.array_family import flex
new_images = [flex.int(flex.grid(image.focus()), 0) for k in range(split)]
negative = image.as_1d() < 0
positive = image.as_1d() > 0
for new_image in new_images:
new_image.as_1d().set_selected(negative, image.as_1d())
for p in positive.iselection():
counts = image[p]
for j in range(counts):
new_images[random.randint(0, split - 1)][p] += 1
return new_images
def merge_counts(images):
from scitbx.array_family import flex
image = flex.int(flex.grid(images[0].focus()), 0)
negative = images[0].as_1d() < 0
for i in images:
image += i
image.as_1d().set_selected(negative, images[0].as_1d())
return image
def read_image(in_image):
from dxtbx import load
assert os.path.exists(in_image)
start_tag = binascii.unhexlify("0c1a04d5")
data = gz_open(in_image, "rb").read()
data_offset = data.find(start_tag)
cbf_header = data[:data_offset]
pixel_values = load(in_image).get_raw_data()
return pixel_values, cbf_header
def write_image(out_image, pixel_values, header, nn=1):
assert not os.path.exists(out_image)
start_tag = binascii.unhexlify("0c1a04d5")
compressed = compress(pixel_values)
fixed_header = ""
header = header.decode()
for record in header.split("\n")[:-1]:
if "X-Binary-Size:" in record:
fixed_header += f"X-Binary-Size: {len(compressed)}\r\n"
elif "Content-MD5" in record:
pass
elif "Count_cutoff" in record:
cutoff = int(record.split()[2]) * nn
fixed_header += "# Count_cutoff %d counts\n" % cutoff
else:
fixed_header += f"{record}\n"
tailer = "\r\n--CIF-BINARY-FORMAT-SECTION----\r\n;\r\n"
gz_open(out_image, "wb").write(
fixed_header.encode() + start_tag + compressed + tailer.encode()
)
def main(in_images, out_images):
assert len(in_images) == len(out_images)
n = len(in_images)
for i in in_images:
assert os.path.exists(i)
for o in out_images:
assert not os.path.exists(o)
in_image_data = []
in_image_headers = []
for i in in_images:
print(f"Reading {i}")
pixel, header = read_image(i)
in_image_data.append(pixel)
in_image_headers.append(header)
sum_image = merge_counts(in_image_data)
rebin_images = split_counts(sum_image, n)
for o, pixel, header in zip(out_images, rebin_images, in_image_headers):
print(f"Writing {o}")
write_image(o, pixel, header)
def main_sum(in_images, out_image):
for i in in_images:
assert os.path.exists(i)
assert not os.path.exists(out_image)
in_image_data = []
in_image_headers = []
for i in in_images:
print(f"Reading {i}")
pixel, header = read_image(i)
in_image_data.append(pixel)
in_image_headers.append(header)
sum_image = merge_counts(in_image_data)
print(f"Writing {out_image}")
write_image(out_image, sum_image, in_image_headers[0], nn=len(in_images))
| bsd-3-clause | 9455e05411d582283550c38a3956b876 | 24.7 | 78 | 0.613289 | 3.191022 | false | false | false | false |
dials/dials | tests/command_line/test_ssx_reduction.py | 1 | 2808 | # test running data reduction programs on ssx data
from __future__ import annotations
import procrunner
def test_ssx_reduction(dials_data, tmp_path):
"""
Check that dials.cosym, dials.scale, dials.export and dials.merge run
successfully on ssx data.
Also test a few smaller analysis programs.
"""
ssx = dials_data("cunir_serial_processed", pathlib=True)
ssx_data = dials_data("cunir_serial", pathlib=True)
refls = ssx / "integrated.refl"
expts = ssx / "integrated.expt"
result = procrunner.run(
["dials.cosym", expts, refls],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
cosym_expts = tmp_path / "symmetrized.expt"
cosym_refls = tmp_path / "symmetrized.refl"
assert cosym_expts.is_file()
assert cosym_refls.is_file()
assert (tmp_path / "dials.cosym.html").is_file()
result = procrunner.run(
["dials.scale", cosym_expts, cosym_refls],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
scale_expts = tmp_path / "scaled.expt"
scale_refls = tmp_path / "scaled.refl"
assert scale_expts.is_file()
assert scale_refls.is_file()
assert (tmp_path / "dials.scale.html").is_file()
# run scaling with reference model / cif
for reference in [
ssx_data / "2BW4.pdb",
ssx_data / "2bw4.cif",
ssx_data / "2bw4-sf.cif",
]:
result = procrunner.run(
[
"dials.scale",
cosym_expts,
cosym_refls,
f"reference={reference}",
"output.experiments=scaled_ref.expt",
"output.reflections=scaled_ref.refl",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
result = procrunner.run(
["dials.export", scale_expts, scale_refls],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.mtz").is_file()
result = procrunner.run(
["dials.merge", scale_expts, scale_refls],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "merged.mtz").is_file()
result = procrunner.run(
["dials.damage_analysis", scale_expts, scale_refls],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "dials.damage_analysis.html").is_file()
result = procrunner.run(
["dials.compute_delta_cchalf", scale_expts, scale_refls],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "compute_delta_cchalf.html").is_file()
| bsd-3-clause | 053a0b5d963455746a4a6e33460b2e07 | 32.035294 | 73 | 0.624288 | 3.395405 | false | false | false | false |
douban/dpark | dpark/shuffle.py | 1 | 26136 | from __future__ import absolute_import
from __future__ import print_function
import os
import os.path
import random
import six
from six.moves import urllib, queue, range, zip, reduce, cPickle as pickle
import marshal
import struct
import time
import heapq
import itertools
from operator import itemgetter
from itertools import islice
from functools import wraps
try:
import cStringIO as StringIO
except ImportError:
from six import BytesIO as StringIO
import dpark.conf
from dpark.utils import compress, decompress, spawn, atomic_file
from dpark.utils.memory import ERROR_TASK_OOM
from dpark.utils.log import get_logger
from dpark.env import env
from dpark.tracker import GetValueMessage, SetValueMessage
from dpark.utils.heaponkey import HeapOnKey
from dpark.dependency import AggregatorBase
from dpark.utils.nested_groupby import GroupByNestedIter, cogroup_no_dup
logger = get_logger(__name__)
# readable
F_MAPPING = {
(True, True): b'M',
(False, True): b'P',
(True, False): b'm',
(False, False): b'p'
}
F_MAPPING_R = dict([(v, k) for k, v in F_MAPPING.items()])
def pack_header(length, is_marshal, is_sorted):
flag = F_MAPPING[(is_marshal, is_sorted)]
return flag + struct.pack("I", length)
def unpack_header(head):
l = len(head)
if l != 5:
raise IOError("fetch bad head length %d" % (l,))
flag = head[:1]
is_marshal, is_sorted = F_MAPPING_R[flag]
length, = struct.unpack("I", head[1:5])
return length, is_marshal, is_sorted
def write_buf(stream, buf, is_marshal):
buf = compress(buf)
size = len(buf)
stream.write(pack_header(size, is_marshal, True))
stream.write(buf)
return size + 4
class AutoBatchedSerializer(object):
"""
Choose the size of batch automatically based on the size of object
"""
size_loaded = 0
def __init__(self, best_size=1 << 17):
self.best_size = best_size
self.max_num = 0
self.max_size = 0
self.use_marshal = True
self.num_batch = 0
self.file_size = 0
def load_stream(self, stream):
while True:
head = stream.read(5)
if not head:
return
length, is_marshal, is_sorted = unpack_header(head)
assert (is_sorted)
buf = stream.read(length)
if len(buf) < length:
raise IOError("length not match: expected %d, but got %d" % (length, len(buf)))
buf = decompress(buf)
AutoBatchedSerializer.size_loaded += len(buf)
if is_marshal:
vs = marshal.loads(buf)
else:
vs = pickle.loads(buf)
for v in vs:
yield v
def dump_stream(self, iterator, stream):
self._dump_stream(iter(iterator), stream)
logger.debug("max batch num = %d, max batch size = %d", self.max_num, self.max_size)
def _dump_stream(self, iterator, stream):
batch_num = 1
while True:
vs = list(itertools.islice(iterator, batch_num))
self.num_batch += 1
if not vs:
break
batch_num = self._dump_batch(stream, vs, batch_num)
def _dump_batch(self, stream, vs, batch_num):
if self.use_marshal:
try:
buf = marshal.dumps(vs)
except:
buf = pickle.dumps(vs, -1)
self.use_marshal = False
else:
buf = pickle.dumps(vs, -1)
mem_size = len(buf)
self.file_size += write_buf(stream, buf, self.use_marshal)
if mem_size < self.best_size:
batch_num *= 2
if batch_num > self.max_num:
self.max_num = batch_num
else:
if mem_size > self.best_size * 2 and batch_num > 1:
batch_num //= 2
if mem_size > self.max_size:
self.max_size = mem_size
return batch_num
class GroupByAutoBatchedSerializer(AutoBatchedSerializer):
def _dump_stream(self, iterator, stream):
batch_num = 1
def _batching():
batch = []
num = 0
for k, vs in iterator:
n = len(vs)
if n + num <= batch_num:
batch.append((k, vs))
num += n
else:
if batch:
yield batch
batch = []
num = 0
if n >= batch_num:
sub_it = iter(vs)
while True:
sub_vs = list(itertools.islice(sub_it, batch_num))
if not sub_vs:
break
yield [(k, sub_vs)]
else:
batch.append((k, vs))
num = n
if batch:
yield batch
for k_vs in _batching():
self.num_batch += 1
batch_num = self._dump_batch(stream, k_vs, batch_num)
def get_serializer(rddconf):
if rddconf.iter_group and (rddconf.is_groupby or rddconf.is_cogroup):
return GroupByAutoBatchedSerializer()
else:
return AutoBatchedSerializer()
def fetch_with_retry(f):
MAX_RETRY = 3
RETRY_INTERVALS = [1, 10]
@wraps(f)
def _(self):
self.num_batch_done = 0
while True:
try:
for items in islice(f(self), self.num_batch_done, None):
self.num_batch_done += 1
yield items
if self.num_retry > 0:
logger.info("Fetch retry %d success for url %s, num_batch %d ", self.num_retry, self.url,
self.num_batch_done)
break
except Exception as e:
self.num_retry += 1
msg = "Fetch failed for url %s, tried %d/%d times. Exception: %s. " % (
self.url, self.num_retry, MAX_RETRY, e)
fail_fast = False
emsg = str(e)
if any([emsg.find(s) >= 0 for s in ["404"]]):
# "many open file",
fail_fast = True
msg += "no need to retry."
if fail_fast or self.num_retry >= MAX_RETRY:
logger.warning(msg)
from dpark.task import FetchFailed
raise FetchFailed(self.uri, self.sid, self.mid, self.rid)
else:
sleep_time = RETRY_INTERVALS[self.num_retry - 1]
msg += "sleep %d secs" % (sleep_time,)
logger.debug(msg)
time.sleep(sleep_time)
return _
class RemoteFile(object):
num_open = 0
def __init__(self, uri, shuffle_id, map_id, reduce_id):
self.uri = uri
self.sid = shuffle_id
self.mid = map_id
self.rid = reduce_id
self.url = ShuffleWorkDir(shuffle_id, map_id, reduce_id).restore(uri)
logger.debug("fetch %s", self.url)
self.num_retry = 0
self.num_batch_done = 0
def open(self):
f = urllib.request.urlopen(self.url)
if f.code == 404:
f.close()
raise IOError("not found")
exp_size = int(f.headers['content-length'])
return f, exp_size
@fetch_with_retry
def unsorted_batches(self):
f = None
# TEST_RETRY = True
try:
f, exp_size = self.open()
total_size = 0
while True:
head = f.read(5)
if len(head) == 0:
break
length, is_marshal, is_sorted = unpack_header(head)
assert (not is_sorted)
total_size += length + 5
d = f.read(length)
if length != len(d):
raise IOError(
"length not match: expected %d, but got %d" %
(length, len(d)))
d = decompress(d)
if is_marshal:
items = marshal.loads(d)
else:
try:
items = pickle.loads(d)
except:
time.sleep(1)
items = pickle.loads(d)
yield items
# if TEST_RETRY and self.num_retry == 0:
# raise Exception("test_retry")
if total_size != exp_size:
raise IOError(
"fetch size not match: expected %d, but got %d" %
(exp_size, total_size))
env.task_stats.bytes_fetch += exp_size
finally:
if f:
f.close()
@fetch_with_retry
def sorted_items(self):
f = None
try:
serializer = AutoBatchedSerializer()
self.num_open += 1
f, exp_size = self.open()
for obj in serializer.load_stream(f):
yield obj
env.task_stats.bytes_fetch += exp_size
finally:
# rely on GC to close if generator not exhausted
# so Fetcher must not be an attr of RDD
if f:
f.close()
self.num_open -= 1
class ShuffleFetcher(object):
@classmethod
def _get_uris(cls, shuffle_id):
uris = MapOutputTracker.get_locs(shuffle_id)
mapid_uris = list(zip(list(range(len(uris))), uris))
random.shuffle(mapid_uris)
return mapid_uris
@classmethod
def get_remote_files(cls, shuffle_id, reduce_id):
uris = cls._get_uris(shuffle_id)
return [RemoteFile(uri, shuffle_id, map_id, reduce_id) for map_id, uri in uris]
def fetch(self, shuffle_id, reduce_id, merge_func):
raise NotImplementedError
def stop(self):
pass
class SimpleShuffleFetcher(ShuffleFetcher):
def fetch(self, shuffle_id, reduce_id, merge_func):
logger.debug(
"Fetching outputs for shuffle %d, reduce %d",
shuffle_id, reduce_id)
for f in self.get_remote_files():
for items in f.unsorted_batches():
merge_func(items)
class ParallelShuffleFetcher(SimpleShuffleFetcher):
def __init__(self, nthreads):
self.nthreads = nthreads
self._started = False
def start(self):
if self._started:
return
self._started = True
self.requests = queue.Queue()
self.results = queue.Queue(self.nthreads)
self.threads = [spawn(self._fetch_thread)
for i in range(self.nthreads)]
def _fetch_thread(self):
from dpark.task import FetchFailed
while True:
f = self.requests.get()
if f is None:
break
try:
for items in f.unsorted_batches():
if not self._started:
break
self.results.put((items, f.mid))
if not self._started:
break
self.results.put(1)
except FetchFailed as e:
if not self._started:
break
self.results.put(e)
break
def fetch(self, shuffle_id, reduce_id, merge_func):
self.start()
files = self.get_remote_files(shuffle_id, reduce_id)
for f in files:
self.requests.put(f)
t = time.time()
from dpark.task import FetchFailed
num_done = 0
while num_done < len(files):
r = self.results.get()
if r == 1:
num_done += 1
elif isinstance(r, FetchFailed):
self.stop()
raise r
else:
items, map_id = r
merge_func(items, map_id)
env.task_stats.secs_fetch = time.time() - t
def stop(self):
if not self._started:
return
logger.debug("stop parallel shuffle fetcher ...")
self._started = False
while not self.requests.empty():
self.requests.get_nowait()
for i in range(self.nthreads):
self.requests.put(None)
N = 5
for _ in range(N):
while not self.results.empty():
self.results.get_nowait()
for t in self.threads:
t.join(1)
if all([not t.isAlive() for t in self.threads]):
return
else:
logger.info("FIXME: fail to join fetcher threads")
class SortShuffleFetcher(ShuffleFetcher):
def get_iters(self, shuffle_id, reduce_id):
return [f.sorted_items() for f in self.get_remote_files(shuffle_id, reduce_id)]
def fetch(self, shuffle_id, reduce_id, merge_func):
merge_func(self.get_iters(shuffle_id, reduce_id))
def heap_merged(items_lists, combiner):
heap = []
def pushback(_it, _i):
try:
_k, _v = next(_it)
# put i before value, so do not compare the value
heapq.heappush(heap, (_k, i, _v))
except StopIteration:
pass
for i, it in enumerate(items_lists):
pushback(it, i)
if not heap:
return
last_key, i, last_value = heapq.heappop(heap)
pushback(items_lists[i], i)
while heap:
k, i, v = heapq.heappop(heap)
if k != last_key:
yield last_key, last_value
last_key, last_value = k, v
else:
last_value = combiner(last_value, v)
pushback(items_lists[i], i)
yield last_key, last_value
class SortedItemsOnDisk(object):
def __init__(self, items, rddconf):
self.path = path = env.workdir.alloc_tmp("sorted_items")
with atomic_file(path, bufsize=4096) as f:
if not isinstance(items, list):
items = list(items)
items.sort(key=itemgetter(0))
serializer = get_serializer(rddconf)
serializer.dump_stream(items, f)
self.size = f.tell()
self.num_batch = serializer.num_batch
def __iter__(self):
serializer = AutoBatchedSerializer()
with open(self.path, 'rb') as f:
for obj in serializer.load_stream(f):
yield obj
def __dealloc__(self):
try:
if os.path.exists(self.path):
os.remove(self.path)
except Exception:
pass
class Merger(object):
def __init__(self, rddconf, aggregator=None, size=None, api_callsite=None):
self.rddconf = rddconf
self.aggregator = aggregator
self.size = size
self.api_callsite = api_callsite
@classmethod
def get(cls, rddconf, aggregator=None, size=0, api_callsite=None):
if rddconf.sort_merge:
# all mergers keep order
c = SortMerger
if rddconf.is_cogroup:
if rddconf.iter_group:
c = IterCoGroupSortMerger
elif rddconf.is_groupby:
if rddconf.iter_group:
c = IterGroupBySortMerger
else:
c = DiskHashMerger
if rddconf.is_groupby:
if rddconf.ordered_group:
c = OrderedGroupByDiskHashMerger
elif rddconf.is_cogroup:
if rddconf.ordered_group:
c = OrderedCoGroupDiskHashMerger
else:
c = CoGroupDiskHashMerger
logger.debug("%s %s", c, rddconf)
return c(rddconf, aggregator, size, api_callsite)
class DiskHashMerger(Merger):
def __init__(self, rddconf, aggregator=None, size=None, api_callsite=None):
Merger.__init__(self, rddconf, aggregator, size, api_callsite)
self.combined = {}
self.use_disk = rddconf.disk_merge
if self.use_disk:
env.meminfo.ratio = rddconf.dump_mem_ratio
self.archives = []
self.rotate_time = 0
self.last_rotate_ts = time.time()
self.rotate_num = 0
self.total_size = 0
def _rotate(self):
total_size = self.total_size
t0 = time.time()
time_since_last = t0 - self.last_rotate_ts
dict_size = len(self.combined)
rss_before = env.meminfo.rss_rt
size = self._dump()
self.total_size += size
rss_after = env.meminfo.rss_rt
t1 = time.time()
rotate_time = t1 - t0
self.last_rotate_ts = t1
self.rotate_time += rotate_time
self.rotate_num += 1
max_rotate = 1000
if self.rotate_num > max_rotate:
logger.warnging('more than %d rotation. exit!', max_rotate)
os._exit(ERROR_TASK_OOM)
env.meminfo.after_rotate()
_log = logger.info if dpark.conf.LOG_ROTATE else logger.debug
_log('rotate %d: use %.2f sec, since last %.2f secs, dict_size 0x%x,'
'mem %d -> %d MB, disk size +%d = %d MB',
self.rotate_num, rotate_time, time_since_last, dict_size,
rss_before >> 20, rss_after >> 20, size >> 20, total_size >> 20)
return env.meminfo.mem_limit_soft
def disk_size(self):
return sum([a.size for a in self.archives])
def _dump(self):
import gc
items = self.combined.items()
f = SortedItemsOnDisk(items, self.rddconf)
self.archives.append(f)
del items
if self.rddconf.is_groupby:
for v in self.combined.itervalues():
del v[:]
self.combined.clear()
gc.collect()
return f.size
def merge(self, items, map_id, dep_id=0):
mem_limit = env.meminfo.mem_limit_soft
use_disk = self.use_disk
try:
env.meminfo.check = use_disk
self._merge(items, map_id, dep_id, use_disk, env.meminfo, mem_limit)
finally:
env.meminfo.check = True
def _get_merge_function(self):
return self.aggregator.mergeCombiners
def _merge(self, items, map_id, dep_id, use_disk, meminfo, mem_limit):
combined = self.combined
merge_combiner = self.aggregator.mergeCombiners
for k, v in items:
o = combined.get(k)
combined[k] = merge_combiner(o, v) if o is not None else v
if use_disk and meminfo.rss > mem_limit:
mem_limit = self._rotate()
def __iter__(self):
if not self.archives:
return six.iteritems(self.combined)
items = self.combined.items()
items.sort(key=itemgetter(0))
combined = items
self.archives.append(iter(combined))
iters = list(map(iter, self.archives))
if self.rddconf.is_groupby and self.rddconf.iter_group:
heap = HeapOnKey(key=lambda x: x[0], min_heap=True)
it = GroupByNestedIter(heap.merge(iters), "")
else:
it = heap_merged(iters, self._get_merge_function())
return it
class OrderedGroupByDiskHashMerger(DiskHashMerger):
def _merge(self, items, map_id, dep_id, use_disk, meminfo, mem_limit):
combined = self.combined
for k, v in items:
o = combined.get(k)
iv = (map_id, v)
if o is None:
combined[k] = [iv]
else:
o.append(iv)
if use_disk and meminfo.rss > mem_limit:
mem_limit = self._rotate()
def __iter__(self):
it = DiskHashMerger.__iter__(self)
merge_combiner = self.aggregator.mergeCombiners
for k, ivs in it:
ivs.sort(key=itemgetter(0))
cb = reduce(merge_combiner, (v for _, v in ivs))
yield k, cb
class CoGroupDiskHashMerger(DiskHashMerger):
def __init__(self, rddconf, aggregator=None, size=None, api_callsite=None):
DiskHashMerger.__init__(self, rddconf, aggregator, size, api_callsite)
self.direct_upstreams = []
def _get_merge_function(self):
def _merge(x, y):
for i in range(self.size):
x[i].extend(y[i])
return x
return _merge
def _merge(self, items, map_id, dep_id, use_disk, meminfo, mem_limit):
combined = self.combined
if map_id < 0:
for k, v in items:
t = combined.get(k)
if t is None:
combined[k] = t = tuple([[] for _ in range(self.size)])
t[dep_id].append(v)
if use_disk and meminfo.rss > mem_limit:
mem_limit = self._rotate()
else:
for k, vs in items:
t = combined.get(k)
if t is None:
combined[k] = t = tuple([[] for _ in range(self.size)])
t[dep_id].extend(vs)
class OrderedCoGroupDiskHashMerger(CoGroupDiskHashMerger):
def _merge(self, items, map_id, dep_id, use_disk, meminfo, mem_limit):
combined = self.combined
if map_id < 0:
self.upstreams.append(dep_id)
for k, v in items:
t = combined.get(k)
if t is None:
combined[k] = t = tuple([[] for _ in range(self.size)])
t[dep_id].append(v)
if use_disk and meminfo.rss > mem_limit:
mem_limit = self._rotate()
else:
for k, vs in items:
t = combined.get(k)
if t is None:
combined[k] = t = tuple([[] for _ in range(self.size)])
t[dep_id].append((map_id, vs))
if use_disk and meminfo.rss > mem_limit:
mem_limit = self._rotate()
def __iter__(self):
it = DiskHashMerger.__iter__(self)
direct_upstreams = self.direct_upstreams
for k, groups in it:
t = list([[] for _ in range(self.size)])
for i, g in enumerate(groups):
if g:
if i in direct_upstreams:
t[i] = g
else:
g.sort(key=itemgetter(0))
g1 = []
for _, vs in g:
g1.extend(vs)
t[i] = g1
yield k, tuple(t)
class SortMergeAggregator(AggregatorBase):
def __init__(self, mergeCombiners):
# each item is a combiner
self.mergeValue = mergeCombiners
def createCombiner(self, v):
return v
class CoGroupSortMergeAggregator(AggregatorBase):
def __init__(self, size):
self.size = size
def createCombiner(self, v):
# v = (rdd_index, value)
values = tuple([[] for _ in range(self.size)])
values[v[0]].extend(v[1])
return values
def mergeValue(self, c, v):
c[v[0]].extend(v[1])
return c
class SortMerger(Merger):
def __init__(self, rddconf, aggregator=None, size=None, api_callsite=None):
Merger.__init__(self, rddconf, aggregator, size, api_callsite)
if aggregator:
self.aggregator = SortMergeAggregator(self.aggregator.mergeCombiners)
else:
self.aggregator = CoGroupSortMergeAggregator(size)
self.combined = iter([])
self.paths = []
def _merge_sorted(self, iters):
heap = HeapOnKey(key=lambda x: x[0], min_heap=True)
merged = heap.merge(iters)
return self.aggregator.aggregate_sorted(merged)
def _disk_merge_sorted(self, iters):
t = time.time()
s = AutoBatchedSerializer()
iters = iter(iters)
while True:
batch = list(islice(iters, 100))
if not batch:
break
path = env.workdir.alloc_tmp_file("sort_merger")
with open(path, 'wb') as f:
s.dump_stream(self._merge_sorted(batch), f)
self.paths.append(path)
env.task_stats.num_fetch_rotate += 1
files = [s.load_stream(open(p)) for p in self.paths]
env.task_stats.secs_fetch = time.time() - t
return self._merge_sorted(files)
def merge(self, iters):
if self.rddconf.disk_merge or len(iters) > dpark.conf.MAX_OPEN_FILE:
merged = self._disk_merge_sorted(iters)
else:
merged = self._merge_sorted(iters)
self.combined = merged
def __iter__(self):
return self.combined
class IterGroupBySortMerger(SortMerger):
def _merge_sorted(self, iters):
heap = HeapOnKey(key=lambda x: x[0], min_heap=True)
return GroupByNestedIter(heap.merge(iters), self.api_callsite)
class IterCoGroupSortMerger(SortMerger):
def _merge_sorted(self, iters):
# each item like <key, values>
return cogroup_no_dup(list(map(iter, iters)))
class MapOutputTracker(object):
@classmethod
def get_key(cls, shuffle_id):
return 'shuffle:{}'.format(shuffle_id)
@classmethod
def set_locs(cls, shuffle_id, locs):
key = cls.get_key(shuffle_id)
env.trackerServer.set(key, locs)
@classmethod
def get_locs(cls, shuffle_id):
key = cls.get_key(shuffle_id)
if env.trackerServer:
return env.trackerServer.get(key)
else:
return env.trackerClient.call(GetValueMessage(key))
class ShuffleWorkDir(object):
def __init__(self, shuffle_id, input_id, output_id):
self.subpath = os.path.join(str(shuffle_id), str(input_id), str(output_id))
def get(self):
return env.workdir.get_path(self.subpath)
@classmethod
def alloc_tmp(cls, mem_first=True, datasize=0):
return env.workdir.alloc_tmp_file("shuffle", mem_first, datasize)
def export(self, tmppath):
return env.workdir.export(tmppath, self.subpath)
def restore(self, uri):
if uri == env.server_uri:
# urllib can open local file
url = 'file://' + self.get()
else:
url = "%s/%s" % (uri, self.subpath)
return url
| bsd-3-clause | dc5560e712c048c7104ed1948d03914c | 29.748235 | 109 | 0.529232 | 3.831135 | false | false | false | false |
douban/dpark | dpark/utils/frame.py | 1 | 5431 | import os
import inspect
from collections import defaultdict
import linecache
def get_path(p):
return os.path.realpath(os.path.abspath(p))
src_dir = os.path.dirname(os.path.dirname(get_path(__file__)))
class Frame(object):
def __init__(self, f):
"""working in func_name, exec code at pos"""
self.path = get_path(f.f_code.co_filename)
self.lineno = f.f_lineno
self.lasti = f.f_lasti
self.func_name = f.f_code.co_name
@property
def pos(self):
return self.path, self.lineno, self.lasti
def frame_tuple(f):
return f.f_code.co_filename, f.f_lineno, f.f_lasti
def func_info(f):
co = getattr(f, "__code__", None)
if co:
return "{}@{}:{}".format(co.co_name, co.co_filename, co.co_firstlineno)
else:
return "{}".format(f) # builtin_function_or_method
def summary_stack(frames):
result = []
for f in frames:
co = f.f_code
pos = '{}:{}, in {}'.format(co.co_filename, f.f_lineno, co.co_name)
line = linecache.getline(co.co_filename, f.f_lineno).strip()
if line:
line = line.strip()
# if f.f_locals:
# for name, value in sorted(f.f_locals.items()):
# row.append(' {name} = {value}\n'.format(name=name, value=value))
result.append({"pos": pos, "line": line})
return result
class Scope(object):
scopes_by_id = {}
scopes_by_stackhash = {}
scopes_by_api_callsite_id = {}
api_callsites = {}
calls_in_oneline = defaultdict(dict) # (path, line_no, fname) -> [lasti...]
gid = 0
def __init__(self, name_fmt, stack, stackhash, api, api_callsite, stack_above_api):
self.id = Scope.gid
Scope.gid += 1
self.name = name_fmt.format(api=api)
self.stack = stack
self.stackhash = stackhash
self.api = api
self.api_callsite = api_callsite
self.key = "{}@{}".format(api, self.api_callsite)
self.api_callsite_id = self.api_callsites.get(api_callsite)
self.stack_above_api = stack_above_api
if self.api_callsite_id is None:
self.api_callsite_id = self.api_callsites[api_callsite] = len(self.api_callsites)
self.scopes_by_api_callsite_id[self.api_callsite_id] = [self]
else:
self.scopes_by_api_callsite_id[self.api_callsite_id].append(self)
# print(self.id, self.api_callsite_id, api_callsite, self.name)
@classmethod
def reset(cls):
cls.scopes_by_id = {}
cls.scopes_by_stackhash = {}
cls.scopes_by_api_callsite_id = {}
cls.api_callsites = {}
cls.calls_in_oneline = defaultdict(dict)
cls.gid = 0
@classmethod
def get_callsite(cls, caller, callee):
"""
Deal with usage like "rdd.map(_).map(_)", distinguish same dpark api called in one line by lasti.
To be comprehensible, replace lasti with order of calling of same api in this line , starts with 0.
"""
callee = Frame(callee) # the dpark api called by user, DparkContext.xxx() or RDD.xxx()
caller = Frame(caller) # the first callsite out of dpark package, where user call dpark api
key = caller.path, caller.lineno, callee.func_name
calls = cls.calls_in_oneline.setdefault(key, [])
i = -1
for i, lasti in enumerate(calls):
if lasti == caller.lasti:
seq = i
break
else:
seq = i + 1
calls.append(caller.lasti)
api = callee.func_name
api_callsite = "{}:{}@{}:{}".format(callee.func_name, seq, caller.path, caller.lineno)
return api, api_callsite
@classmethod
def get(cls, name_fmt):
callee = inspect.currentframe()
caller = callee.f_back
stack = []
stack_above_api = []
api_caller = None
api_callee = None
while caller is not None:
stack.append(frame_tuple(caller))
if api_callee is None:
if src_dir != os.path.dirname(get_path(caller.f_code.co_filename)):
api_callee = callee # the dpark api called by user, DparkContext.xxx() or RDD.xxx()
api_caller = caller # the first callsite out of dpark package, where user call dpark api
stack_above_api.append(caller)
else:
stack_above_api.append(caller)
callee = caller
caller = caller.f_back
stack = tuple(stack)
stackhash = hash(stack)
scope = cls.scopes_by_stackhash.get(stackhash)
if scope is None:
stack_above_api = summary_stack(stack_above_api)
api, api_callsite = cls.get_callsite(api_caller, api_callee)
scope = Scope(name_fmt, stack, stackhash, api, api_callsite, stack_above_api)
cls.scopes_by_stackhash[stackhash] = scope
cls.scopes_by_id[scope.id] = scope
return scope
def get_stacks_of_threads():
import threading, sys, traceback
threads = {}
for t in threading.enumerate():
f = sys._current_frames()[t.ident]
k = t.name
stack = traceback.format_stack()
v = {
"stack": stack,
"f_locals": "{}".format(f.f_locals),
"f_back.f_locals": "{}".format(f.f_back.f_locals)
}
threads[k] = v
return threads
| bsd-3-clause | e39d29b6b3316c096f6b63477778bb7e | 31.915152 | 109 | 0.573559 | 3.428662 | false | false | false | false |
dials/dials | src/dials/util/export_shelx.py | 1 | 6338 | from __future__ import annotations
import logging
from math import isclose
from cctbx import crystal, miller
from iotbx.shelx.write_ins import LATT_SYMM
from dials.algorithms.scaling.scaling_library import determine_best_unit_cell
from dials.array_family import flex
from dials.util import Sorry
from dials.util.filter_reflections import filter_reflection_table
logger = logging.getLogger(__name__)
def export_shelx(scaled_data, experiment_list, params):
"""Export scaled data corresponding to experiment_list to
a SHELX HKL formatted text file."""
# Handle requesting profile intensities (default via auto) but no column
if "profile" in params.intensity and "intensity.prf.value" not in scaled_data:
raise Sorry(
"Requested profile intensity data but only summed present. Use intensity=sum."
)
# use supplied best unit cell or that determined from experiment list to define d in reflection table.
best_unit_cell = params.mtz.best_unit_cell
if best_unit_cell is None:
best_unit_cell = determine_best_unit_cell(experiment_list)
else:
logger.info("Using supplied unit cell across experiments : %s", best_unit_cell)
scaled_data["d"] = best_unit_cell.d(scaled_data["miller_index"])
# Clean up reflection table with mtz defaults (as in export_xds_ascii)
scaled_data = filter_reflection_table(
scaled_data,
intensity_choice=params.intensity,
partiality_threshold=params.mtz.partiality_threshold,
combine_partials=params.mtz.combine_partials,
min_isigi=params.mtz.min_isigi,
filter_ice_rings=params.mtz.filter_ice_rings,
d_min=params.mtz.d_min,
)
# Check that all experiments have the same space group
if len({x.crystal.get_space_group().make_tidy() for x in experiment_list}) != 1:
raise ValueError("Experiments do not have a unique space group")
# Create miller set with space group from 1st crystal in experiment list and best unit cell
miller_set = miller.set(
crystal_symmetry=crystal.symmetry(
unit_cell=best_unit_cell,
space_group=experiment_list[0].crystal.get_space_group(),
),
indices=scaled_data["miller_index"],
anomalous_flag=False,
)
intensity_choice = (
params.intensity[0] if params.intensity[0] != "profile" else "prf"
)
intensities, variances = (
scaled_data["intensity." + intensity_choice + ".value"],
scaled_data["intensity." + intensity_choice + ".variance"],
)
assert variances.all_gt(0)
i_obs = miller.array(miller_set, data=intensities)
i_obs.set_observation_type_xray_intensity()
i_obs.set_sigmas(flex.sqrt(variances))
# If requested scale up data to maximise use of scale_range
if params.shelx.scale:
max_val = max(
i_obs.data().min_max_mean().max, i_obs.sigmas().min_max_mean().max
)
min_val = min(
i_obs.data().min_max_mean().min, i_obs.sigmas().min_max_mean().min
)
min_scale = params.shelx.scale_range[0] / min_val
max_scale = params.shelx.scale_range[1] / max_val
scale = min(min_scale, max_scale)
i_obs = i_obs.apply_scaling(factor=scale)
# write the SHELX HKL file
hkl_file = params.shelx.hklout
with open(hkl_file, "w") as hkl_file_object:
i_obs.export_as_shelx_hklf(
file_object=hkl_file_object,
scale_range=params.shelx.scale_range,
normalise_if_format_overflow=True,
)
logger.info(f"Written {i_obs.size()} reflections to {hkl_file}")
# and a stub of an .ins file with information from the .expt file
_write_ins(
experiment_list,
best_unit_cell=params.mtz.best_unit_cell,
ins_file=params.shelx.ins,
)
logger.info(f"Written {params.shelx.ins}")
def _write_ins(experiment_list, best_unit_cell, ins_file):
sg = experiment_list[0].crystal.get_space_group()
unit_cells = []
wavelengths = []
# Check for single wavelength
for exp in experiment_list:
wl = exp.beam.get_wavelength()
if not any([isclose(wl, w, abs_tol=1e-4) for w in wavelengths]):
wavelengths.append(wl)
if len(wavelengths) > 1:
raise ValueError("Experiments have more than one wavelength")
else:
wl = wavelengths[0]
# if user has supplied best_unit_cell use it
if best_unit_cell is not None:
uc = best_unit_cell
uc_sd = None
else:
for exp in experiment_list:
unit_cells.append(
exp.crystal.get_recalculated_unit_cell() or exp.crystal.get_unit_cell()
)
if len(unit_cells) > 1:
if (
len({uc.parameters() for uc in unit_cells}) > 1
): # have different cells so no esds
uc = determine_best_unit_cell(experiment_list)
uc_sd = None
else: # identical (recalculated?) unit cell with esds
uc = (
experiment_list[0].crystal.get_recalculated_unit_cell()
or experiment_list[0].crystal.get_unit_cell()
)
uc_sd = (
experiment_list[0].crystal.get_recalculated_cell_parameter_sd()
or experiment_list[0].crystal.get_cell_parameter_sd()
)
else: # single unit cell
uc = (
experiment_list[0].crystal.get_recalculated_unit_cell()
or experiment_list[0].crystal.get_unit_cell()
)
uc_sd = (
experiment_list[0].crystal.get_recalculated_cell_parameter_sd()
or experiment_list[0].crystal.get_cell_parameter_sd()
)
with open(ins_file, "w") as f:
f.write(
f"TITL {sg.type().number()} in {sg.type().lookup_symbol().replace(' ','')}\n"
)
f.write(
"CELL {:8.5f} {:8.4f} {:8.4f} {:8.4f} {:8.3f} {:8.3f} {:8.3f}\n".format(
wl, *uc.parameters()
)
)
if uc_sd:
f.write(
"ZERR {:8.3f} {:8.4f} {:8.4f} {:8.4f} {:8.3f} {:8.3f} {:8.3f}\n".format(
sg.order_z(), *uc_sd
)
)
LATT_SYMM(f, sg)
| bsd-3-clause | 1d422dbf01a60dd156ce6f54117cb4e8 | 36.502959 | 106 | 0.5994 | 3.40204 | false | false | false | false |
dials/dials | tests/algorithms/refinement/test_restraints_parameterisation.py | 1 | 14559 | """Tests for RestraintsParameterisation and associated classes used in refinement"""
from __future__ import annotations
import os
import random
import pytest
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx.phil import parse
from dials.algorithms.refinement import RefinerFactory
from dials.algorithms.refinement.restraints import RestraintsParameterisation
from dials.array_family import flex
def test_single_crystal_restraints_gradients():
"""Simple test with a single triclinic crystal restrained to a target unit cell"""
from dxtbx.model.experiment_list import Experiment, ExperimentList
from dials.algorithms.refinement.parameterisation.beam_parameters import (
BeamParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
from dials.algorithms.refinement.parameterisation.detector_parameters import (
DetectorParameterisationSinglePanel,
)
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
)
from . import geometry_phil
from .setup_geometry import Extract
overrides = """geometry.parameters.crystal.a.length.range = 10 50
geometry.parameters.crystal.b.length.range = 10 50
geometry.parameters.crystal.c.length.range = 10 50"""
master_phil = parse(geometry_phil)
models = Extract(master_phil, overrides)
mydetector = models.detector
mygonio = models.goniometer
mycrystal = models.crystal
mybeam = models.beam
# Build a mock scan for a 72 degree sequence
from dxtbx.model import ScanFactory
sf = ScanFactory()
myscan = sf.make_scan(
image_range=(1, 720),
exposure_times=0.1,
oscillation=(0, 0.1),
epochs=list(range(720)),
deg=True,
)
# Create parameterisations of these models
det_param = DetectorParameterisationSinglePanel(mydetector)
s0_param = BeamParameterisation(mybeam, mygonio)
xlo_param = CrystalOrientationParameterisation(mycrystal)
xluc_param = CrystalUnitCellParameterisation(mycrystal)
# Create an ExperimentList
experiments = ExperimentList()
experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
goniometer=mygonio,
scan=myscan,
crystal=mycrystal,
imageset=None,
)
)
# Build a prediction parameterisation
pred_param = XYPhiPredictionParameterisation(
experiments,
detector_parameterisations=[det_param],
beam_parameterisations=[s0_param],
xl_orientation_parameterisations=[xlo_param],
xl_unit_cell_parameterisations=[xluc_param],
)
# Build a restraints parameterisation
rp = RestraintsParameterisation(
detector_parameterisations=[det_param],
beam_parameterisations=[s0_param],
xl_orientation_parameterisations=[xlo_param],
xl_unit_cell_parameterisations=[xluc_param],
)
# make a unit cell target
sigma = 1.0
uc = mycrystal.get_unit_cell().parameters()
target_uc = [random.gauss(e, sigma) for e in uc]
rp.add_restraints_to_target_xl_unit_cell(
experiment_id=0, values=target_uc, sigma=[sigma] * 6
)
# get analytical values and gradients
vals, grads, weights = rp.get_residuals_gradients_and_weights()
assert len(vals) == rp.num_residuals
# get finite difference gradients
p_vals = pred_param.get_param_vals()
deltas = [1.0e-7] * len(p_vals)
fd_grad = []
for i, delta in enumerate(deltas):
val = p_vals[i]
p_vals[i] -= delta / 2.0
pred_param.set_param_vals(p_vals)
rev_state, foo, bar = rp.get_residuals_gradients_and_weights()
rev_state = flex.double(rev_state)
p_vals[i] += delta
pred_param.set_param_vals(p_vals)
fwd_state, foo, bar = rp.get_residuals_gradients_and_weights()
fwd_state = flex.double(fwd_state)
p_vals[i] = val
fd = (fwd_state - rev_state) / delta
fd_grad.append(fd)
# for comparison, fd_grad is a list of flex.doubles, each of which corresponds
# to a column of the sparse matrix grads.
for i, fd in enumerate(fd_grad):
# extract dense column from the sparse matrix
an = grads.col(i).as_dense_vector()
assert an == pytest.approx(fd, abs=1e-5)
def test_two_triclinic_crystals():
"""Simple test with two triclinic crystals restrained to a target unit cell"""
from dxtbx.model.experiment_list import Experiment, ExperimentList
from dials.algorithms.refinement.parameterisation.beam_parameters import (
BeamParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
from dials.algorithms.refinement.parameterisation.detector_parameters import (
DetectorParameterisationSinglePanel,
)
#### Import model parameterisations
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
)
from . import geometry_phil
from .setup_geometry import Extract
overrides = """geometry.parameters.crystal.a.length.range = 10 50
geometry.parameters.crystal.b.length.range = 10 50
geometry.parameters.crystal.c.length.range = 10 50"""
master_phil = parse(geometry_phil)
models = Extract(master_phil, overrides)
mydetector = models.detector
mygonio = models.goniometer
mycrystal = models.crystal
# duplicate the crystal
from copy import deepcopy
mycrystal2 = deepcopy(mycrystal)
mybeam = models.beam
# Build a mock scan for a 72 degree sequence
from dxtbx.model import ScanFactory
sf = ScanFactory()
myscan = sf.make_scan(
image_range=(1, 720),
exposure_times=0.1,
oscillation=(0, 0.1),
epochs=list(range(720)),
deg=True,
)
# Create parameterisations of these models
det_param = DetectorParameterisationSinglePanel(mydetector)
s0_param = BeamParameterisation(mybeam, mygonio)
xlo_param = CrystalOrientationParameterisation(mycrystal)
xluc_param = CrystalUnitCellParameterisation(mycrystal)
xluc_param2 = CrystalUnitCellParameterisation(mycrystal2, experiment_ids=[1])
# Create an ExperimentList with the crystal duplicated
experiments = ExperimentList()
experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
goniometer=mygonio,
scan=myscan,
crystal=mycrystal,
imageset=None,
)
)
experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
goniometer=mygonio,
scan=myscan,
crystal=mycrystal2,
imageset=None,
)
)
# Build a prediction parameterisation
pred_param = XYPhiPredictionParameterisation(
experiments,
detector_parameterisations=[det_param],
beam_parameterisations=[s0_param],
xl_orientation_parameterisations=[xlo_param],
xl_unit_cell_parameterisations=[xluc_param, xluc_param2],
)
# Build a restraints parameterisation
rp = RestraintsParameterisation(
detector_parameterisations=[det_param],
beam_parameterisations=[s0_param],
xl_orientation_parameterisations=[xlo_param],
xl_unit_cell_parameterisations=[xluc_param, xluc_param2],
)
# make a unit cell target
sigma = 1.0
uc = mycrystal.get_unit_cell().parameters()
target_uc = [random.gauss(e, sigma) for e in uc]
rp.add_restraints_to_target_xl_unit_cell(
experiment_id=0, values=target_uc, sigma=[sigma] * 6
)
rp.add_restraints_to_target_xl_unit_cell(
experiment_id=1, values=target_uc, sigma=[sigma] * 6
)
# get analytical values and gradients
vals, grads, weights = rp.get_residuals_gradients_and_weights()
assert len(vals) == rp.num_residuals
# get finite difference gradients
p_vals = pred_param.get_param_vals()
deltas = [1.0e-7] * len(p_vals)
fd_grad = []
for i, delta in enumerate(deltas):
val = p_vals[i]
p_vals[i] -= delta / 2.0
pred_param.set_param_vals(p_vals)
rev_state, foo, bar = rp.get_residuals_gradients_and_weights()
rev_state = flex.double(rev_state)
p_vals[i] += delta
pred_param.set_param_vals(p_vals)
fwd_state, foo, bar = rp.get_residuals_gradients_and_weights()
fwd_state = flex.double(fwd_state)
p_vals[i] = val
fd = (fwd_state - rev_state) / delta
fd_grad.append(fd)
# for comparison, fd_grad is a list of flex.doubles, each of which corresponds
# to a column of the sparse matrix grads.
for i, fd in enumerate(fd_grad):
# extract dense column from the sparse matrix
an = grads.col(i).as_dense_vector()
assert an == pytest.approx(fd, abs=1e-5)
def test_10_crystals_with_stills_parameterisation(dials_regression):
"""Test with multiple crystals, and a stills refiner"""
# The phil scope
from dials.algorithms.refinement.refiner import phil_scope
user_phil = parse(
"""
refinement
{
parameterisation
{
crystal
{
unit_cell
{
restraints
{
tie_to_target
{
values=95,95,132,90,90,120
sigmas=1,1,1,0,0,0
id=0,1,2,3,4,5,6,7,8,9
}
}
}
}
}
}
"""
)
working_phil = phil_scope.fetch(source=user_phil)
working_params = working_phil.extract()
# use the multi stills test data
data_dir = os.path.join(dials_regression, "refinement_test_data", "multi_stills")
experiments_path = os.path.join(data_dir, "combined_experiments.json")
pickle_path = os.path.join(data_dir, "combined_reflections.pickle")
experiments = ExperimentListFactory.from_json_file(
experiments_path, check_format=False
)
reflections = flex.reflection_table.from_file(pickle_path)
refiner = RefinerFactory.from_parameters_data_experiments(
working_params, reflections, experiments
)
# hack to extract the objects needed from the Refiner
rp = refiner._target._restraints_parameterisation
pred_param = refiner._pred_param
# get analytical values and gradients
vals, grads, weights = rp.get_residuals_gradients_and_weights()
assert len(vals) == rp.num_residuals
# get finite difference gradients
p_vals = pred_param.get_param_vals()
deltas = [1.0e-7] * len(p_vals)
fd_grad = []
for i, delta in enumerate(deltas):
val = p_vals[i]
p_vals[i] -= delta / 2.0
pred_param.set_param_vals(p_vals)
rev_state, foo, bar = rp.get_residuals_gradients_and_weights()
rev_state = flex.double(rev_state)
p_vals[i] += delta
pred_param.set_param_vals(p_vals)
fwd_state, foo, bar = rp.get_residuals_gradients_and_weights()
fwd_state = flex.double(fwd_state)
p_vals[i] = val
fd = (fwd_state - rev_state) / delta
fd_grad.append(fd)
# for comparison, fd_grad is a list of flex.doubles, each of which corresponds
# to a column of the sparse matrix grads.
pnames = pred_param.get_param_names()
for i, (pname, fd) in enumerate(zip(pnames, fd_grad)):
# extract dense column from the sparse matrix
an = grads.col(i).as_dense_vector()
# print pname
# print list(an.round(6))
# print list(fd.round(6))
# print
assert an == pytest.approx(fd, abs=1e-5)
def test_group_restraint_with_multiple_crystals_and_a_stills_refiner(dials_regression):
# The phil scope
from dials.algorithms.refinement.refiner import phil_scope
user_phil = parse(
"""
refinement
{
parameterisation
{
crystal
{
unit_cell
{
restraints
{
tie_to_group
{
sigmas=1,0,2,0,0,0
}
}
}
}
}
}
"""
)
working_phil = phil_scope.fetch(source=user_phil)
working_params = working_phil.extract()
# use the multi stills test data
data_dir = os.path.join(dials_regression, "refinement_test_data", "multi_stills")
experiments_path = os.path.join(data_dir, "combined_experiments.json")
pickle_path = os.path.join(data_dir, "combined_reflections.pickle")
experiments = ExperimentListFactory.from_json_file(
experiments_path, check_format=False
)
reflections = flex.reflection_table.from_file(pickle_path)
refiner = RefinerFactory.from_parameters_data_experiments(
working_params, reflections, experiments
)
# hack to extract the objects needed from the Refiner
rp = refiner._target._restraints_parameterisation
pred_param = refiner._pred_param
# get analytical values and gradients
vals, grads, weights = rp.get_residuals_gradients_and_weights()
assert len(vals) == rp.num_residuals
# get finite difference gradients
p_vals = pred_param.get_param_vals()
deltas = [1.0e-7] * len(p_vals)
fd_grad = []
for i, delta in enumerate(deltas):
val = p_vals[i]
p_vals[i] -= delta / 2.0
pred_param.set_param_vals(p_vals)
rev_state, foo, bar = rp.get_residuals_gradients_and_weights()
rev_state = flex.double(rev_state)
p_vals[i] += delta
pred_param.set_param_vals(p_vals)
fwd_state, foo, bar = rp.get_residuals_gradients_and_weights()
fwd_state = flex.double(fwd_state)
p_vals[i] = val
fd = (fwd_state - rev_state) / delta
fd_grad.append(fd)
# for comparison, fd_grad is a list of flex.doubles, each of which corresponds
# to the gradients of the residuals wrt to a single parameter.
pnames = pred_param.get_param_names()
for i, (pname, fd) in enumerate(zip(pnames, fd_grad)):
# extract dense column from the sparse matrix
an = grads.col(i).as_dense_vector()
# print pname
# print list(an.round(6))
# print list(fd.round(6))
# print
assert an == pytest.approx(fd, abs=1e-5)
| bsd-3-clause | 82f8544ad45ee0fd5c28325c33bdd28f | 28.895277 | 87 | 0.647984 | 3.462307 | false | false | false | false |
dials/dials | tests/algorithms/scaling/test_scaling_restraints.py | 1 | 7745 | """
Tests for the scaling restraints module.
"""
from __future__ import annotations
from unittest.mock import Mock
import pytest
from scitbx import sparse
from dials.algorithms.scaling.scaling_restraints import (
ScalingRestraintsCalculator,
SingleScalingRestraintsCalculator,
)
from dials.array_family import flex
@pytest.fixture
def mock_restrained_component():
"""Mock a component with restraints."""
component = Mock()
component.n_params = 3
component.calculate_restraints.return_value = [
flex.double([1.0, 2.0, 3.0]),
flex.double([0.1, 0.2, 0.3]),
]
jacobian_restr = sparse.matrix(component.n_params, component.n_params)
jacobian_restr[0, 0] = 1.0
component.calculate_jacobian_restraints.return_value = [
flex.double([1.0, 1.1, 1.2]),
jacobian_restr,
flex.double([1.0, 2.0, 3.0]),
]
return component
@pytest.fixture
def mock_unrestrained_component():
"""Mock a component without restraints."""
component = Mock()
component.n_params = 5
component.calculate_restraints.return_value = None
component.calculate_jacobian_restraints.return_value = None
return component
@pytest.fixture
def mock_parameter_manager(mock_restrained_component, mock_unrestrained_component):
"""Mock a parameter manager to handle the components required for the
ScalingRestraints class."""
apm = Mock()
apm.components = {
"restrained": {
"object": mock_restrained_component,
"n_params": mock_restrained_component.n_params,
"start_idx": 0,
},
"unrestrained": {
"object": mock_unrestrained_component,
"n_params": mock_unrestrained_component.n_params,
"start_idx": mock_restrained_component.n_params,
},
}
apm.n_active_params = (
mock_restrained_component.n_params + mock_unrestrained_component.n_params
)
return apm
@pytest.fixture
def mock_unrestrained_apm(mock_unrestrained_component):
"""Mock a parameter manager to handle no restrained components."""
apm = Mock()
apm.components = {
"unrestrained": {
"object": mock_unrestrained_component,
"n_params": mock_unrestrained_component.n_params,
"start_idx": 0,
}
}
apm.n_active_params = mock_unrestrained_component.n_params
return apm
@pytest.fixture
def mock_multi_apm(mock_parameter_manager):
"""Mock a multi-dataset parameter manager."""
multi_apm = Mock()
multi_apm.apm_list = [mock_parameter_manager, mock_parameter_manager]
n_params = mock_parameter_manager.n_active_params
multi_apm.n_active_params = n_params * 2
multi_apm.apm_data = {
0: {"start_idx": 0, "end_idx": n_params},
1: {"start_idx": n_params, "end_idx": n_params * 2},
}
return multi_apm
@pytest.fixture
def mock_multi_unrestrained_apm(mock_unrestrained_apm):
"""Mock a parameter manager to handle no restrained components."""
multi_apm = Mock()
multi_apm.apm_list = [mock_unrestrained_apm, mock_unrestrained_apm]
return multi_apm
def test_unrestrained_ScalingRestraints(
mock_unrestrained_apm, mock_multi_unrestrained_apm
):
"""Test the case of unrestrained components. None should be returned in each
case."""
assert (
SingleScalingRestraintsCalculator.calculate_restraints(mock_unrestrained_apm)
is None
)
assert (
SingleScalingRestraintsCalculator.calculate_jacobian_restraints(
mock_unrestrained_apm
)
is None
)
assert (
ScalingRestraintsCalculator.calculate_restraints(mock_multi_unrestrained_apm)
is None
)
assert (
ScalingRestraintsCalculator.calculate_jacobian_restraints(
mock_multi_unrestrained_apm
)
is None
)
def test_ScalingRestraints(
mock_parameter_manager, mock_restrained_component, mock_unrestrained_component
):
"""Test for the single scaling restraints manager."""
# Test the call to calculate restraints. This should return a residual
# vector of the same length as the restraints of the restrained component,
# and a gradient vector of the total length of all parameters.
restraints = SingleScalingRestraintsCalculator.calculate_restraints(
mock_parameter_manager
)
abs_restraints = mock_restrained_component.calculate_restraints()
assert list(restraints[0]) == list(abs_restraints[0])
assert list(restraints[1]) == (
list(abs_restraints[1]) + [0.0] * mock_unrestrained_component.n_params
)
# Test the call to calculate jacobian restraints. This should return a
# restraints vector, jacobian and weights vector from the components. The
# restraints and weights should be the length of the restrained component.
# The jacobian has n_rows equal to the number of restrainted parameters,
# n_cols equal to the total number of parameters. Check that these are
# correctly composed.
jacobian_restraints = (
SingleScalingRestraintsCalculator.calculate_jacobian_restraints(
mock_parameter_manager
)
)
abs_restraints = mock_restrained_component.calculate_jacobian_restraints()
assert list(jacobian_restraints[0]) == list(abs_restraints[0])
assert jacobian_restraints[1].n_cols == mock_parameter_manager.n_active_params
assert jacobian_restraints[1].n_rows == mock_restrained_component.n_params
for i in range(mock_restrained_component.n_params):
for j in range(mock_restrained_component.n_params):
assert jacobian_restraints[1][i, j] == abs_restraints[1][i, j]
# All other elements should be zero
assert abs_restraints[1].non_zeroes == jacobian_restraints[1].non_zeroes
def test_MultiScalingRestraints(
mock_multi_apm, mock_restrained_component, mock_unrestrained_component
):
"""Test for the multi-dataset scaling restraints manager."""
# Test the call to calculate restraints. Expected return is the individual
# dataset vectors joined together.
restraints = ScalingRestraintsCalculator.calculate_restraints(mock_multi_apm)
abs_restraints = mock_restrained_component.calculate_restraints()
assert list(restraints[0]) == (list(abs_restraints[0]) + list(abs_restraints[0]))
assert list(restraints[1]) == (
list(abs_restraints[1])
+ [0.0] * mock_unrestrained_component.n_params
+ list(abs_restraints[1])
+ [0.0] * mock_unrestrained_component.n_params
)
# Test the call to calculate jacobian restraints. Again, the expected return
# is the individual dataset vectors joined together.
jacobian_restraints = ScalingRestraintsCalculator.calculate_jacobian_restraints(
mock_multi_apm
)
abs_restraints = mock_restrained_component.calculate_jacobian_restraints()
assert list(jacobian_restraints[0]) == (
list(abs_restraints[0]) + list(abs_restraints[0])
)
n_abs_params = mock_restrained_component.n_params
n_total_params = mock_multi_apm.apm_data[0]["end_idx"]
assert jacobian_restraints[1].n_cols == mock_multi_apm.n_active_params
assert jacobian_restraints[1].n_rows == n_abs_params * 2
# Check that both restraints jacobians were set in correct location.
for i in range(mock_restrained_component.n_params):
for j in range(mock_restrained_component.n_params):
assert jacobian_restraints[1][i, j] == abs_restraints[1][i, j]
assert jacobian_restraints[1][i + n_abs_params, j + n_total_params] == (
abs_restraints[1][i, j]
)
assert abs_restraints[1].non_zeroes * 2 == jacobian_restraints[1].non_zeroes
| bsd-3-clause | ae08ee0232d71403a2ef04b2e7f4f72f | 34.856481 | 85 | 0.680568 | 3.477773 | false | true | false | false |
dials/dials | src/dials/command_line/export_bitmaps.py | 1 | 10013 | from __future__ import annotations
import os
import sys
from PIL import Image
import iotbx.phil
from dxtbx.model.detector_helpers import get_detector_projection_2d_axes
from dials.algorithms.image.threshold import DispersionThresholdDebug
from dials.array_family import flex
from dials.util import Sorry, show_mail_handle_errors
from dials.util.image_viewer.slip_viewer.flex_image import (
get_flex_image,
get_flex_image_multipanel,
)
from dials.util.options import ArgumentParser, flatten_experiments
help_message = """
Export raw diffraction image files as bitmap images, optionally exporting
images from intermediate spot-finding steps (local mean and variance maps,
or sigma_b, sigma_s or threshold-filtered images). Appearance of the images
can be altered via the brightness and colour_scheme parameters, and optionally
binning of pixels can be used to reduce image sizes.
Examples::
dials.export_bitmaps image.cbf
dials.export_bitmaps models.expt
dials.export_bitmaps image.cbf display=variance colour_scheme=inverse_greyscale
"""
phil_scope = iotbx.phil.parse(
"""
binning = 1
.type = int(value_min=1)
brightness = 100
.type = float(value_min=0.0)
colour_scheme = *greyscale rainbow heatmap inverse_greyscale
.type = choice
projection = lab *image
.type = choice
padding = 4
.type = int(value_min=0)
imageset_index = None
.type = int
.multiple = True
.help = "The index/indices from an imageset to export. The first image of "
"the set is 1."
.expert_level=2
display = *image mean variance dispersion sigma_b \
sigma_s threshold global_threshold
.type = choice
nsigma_b = 6
.type = float(value_min=0)
nsigma_s = 3
.type = float(value_min=0)
global_threshold = 0
.type = float(value_min=0)
kernel_size = 3,3
.type = ints(size=2, value_min=1)
min_local = 2
.type = int
gain = 1
.type = float(value_min=0)
saturation = 0
.type = int
show_mask = False
.type = bool
png {
compress_level = 1
.type = int(value_min=0, value_max=9)
.help = "ZLIB compression level, a number between 0 and 9: 1 gives best "
"speed, 9 gives best compression, 0 gives no compression at all."
}
jpeg {
quality = 75
.type = int(value_min=1, value_max=95)
.help = "The image quality, on a scale from 1 (worst) to 95 (best)"
}
include scope dials.util.options.format_phil_scope
output {
prefix = "image"
.type = str
directory = None
.type = path
file = None
.type = str
.help = "Full name of the output file. Overrides 'prefix' and the default "
"file extension. Only makes sense if a single file is written."
format = jpeg *png tiff
.type = choice
}""",
process_includes=True,
)
colour_schemes = {"greyscale": 0, "rainbow": 1, "heatmap": 2, "inverse_greyscale": 3}
@show_mail_handle_errors()
def run(args=None):
usage = "dials.export_bitmaps [options] models.expt | image.cbf"
parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
read_experiments_from_images=True,
check_format=True,
epilog=help_message,
)
params, options = parser.parse_args(args, show_diff_phil=True)
experiments = flatten_experiments(params.input.experiments)
if len(experiments) == 0:
parser.print_help()
exit(0)
imagesets = experiments.imagesets()
for imageset in imagesets:
imageset_as_bitmaps(imageset, params)
def imageset_as_bitmaps(imageset, params):
brightness = params.brightness / 100
vendortype = "made up"
# check that binning is a power of 2
binning = params.binning
if not (binning > 0 and ((binning & (binning - 1)) == 0)):
raise Sorry("binning must be a power of 2")
output_dir = params.output.directory
if output_dir is None:
output_dir = "."
elif not os.path.exists(output_dir):
os.makedirs(output_dir)
output_files = []
detector = imageset.get_detector()
# Furnish detector with 2D projection axes
detector.projected_2d = get_detector_projection_2d_axes(detector)
detector.projection = params.projection
panel = detector[0]
scan = imageset.get_scan()
# XXX is this inclusive or exclusive?
saturation = panel.get_trusted_range()[1]
if params.saturation:
saturation = params.saturation
if scan is not None and not scan.is_still() and not params.imageset_index:
start, end = scan.get_image_range()
else:
start, end = 1, len(imageset)
# If the user specified an image range index, only export those
image_range = [
i
for i in range(start, end + 1)
if not params.imageset_index or i in params.imageset_index
]
if params.output.file and len(image_range) != 1:
sys.exit("output.file can only be specified if a single image is exported")
for i_image in image_range:
image = imageset.get_raw_data(i_image - start)
mask = imageset.get_mask(i_image - start)
if mask is None:
mask = [p.get_trusted_range_mask(im) for im, p in zip(image, detector)]
if params.show_mask:
for rd, m in zip(image, mask):
rd.set_selected(~m, -2)
image = image_filter(
image,
mask,
display=params.display,
gain_value=params.gain,
nsigma_b=params.nsigma_b,
nsigma_s=params.nsigma_s,
global_threshold=params.global_threshold,
min_local=params.min_local,
kernel_size=params.kernel_size,
)
show_untrusted = params.show_mask
if len(detector) > 1:
# FIXME This doesn't work properly, as flex_image.size2() is incorrect
# also binning doesn't work
flex_image = get_flex_image_multipanel(
brightness=brightness,
detector=detector,
image_data=image,
binning=binning,
beam=imageset.get_beam(),
show_untrusted=show_untrusted,
)
else:
flex_image = get_flex_image(
brightness=brightness,
data=image[0],
binning=binning,
saturation=saturation,
vendortype=vendortype,
show_untrusted=show_untrusted,
)
flex_image.setWindow(0, 0, 1)
flex_image.adjust(color_scheme=colour_schemes.get(params.colour_scheme))
# now export as a bitmap
flex_image.prep_string()
# XXX is size//binning safe here?
pil_img = Image.frombytes(
"RGB", (flex_image.ex_size2(), flex_image.ex_size1()), flex_image.as_bytes()
)
if params.output.file:
path = os.path.join(output_dir, params.output.file)
else:
path = os.path.join(
output_dir,
"{prefix}{image:0{padding}}.{format}".format(
image=i_image,
prefix=params.output.prefix,
padding=params.padding,
format=params.output.format,
),
)
print(f"Exporting {path}")
output_files.append(path)
with open(path, "wb") as tmp_stream:
pil_img.save(
tmp_stream,
format=params.output.format,
compress_level=params.png.compress_level,
quality=params.jpeg.quality,
)
return output_files
def image_filter(
raw_data,
mask,
display,
gain_value,
nsigma_b,
nsigma_s,
global_threshold,
min_local,
kernel_size,
):
if display == "image":
return raw_data
assert gain_value > 0
gain_map = [flex.double(rd.accessor(), gain_value) for rd in raw_data]
kabsch_debug_list = [
DispersionThresholdDebug(
data.as_double(),
mask[i_panel],
gain_map[i_panel],
kernel_size,
nsigma_b,
nsigma_s,
global_threshold,
min_local,
)
for i_panel, data in enumerate(raw_data)
]
if display == "mean":
display_data = [kabsch.mean() for kabsch in kabsch_debug_list]
elif display == "variance":
display_data = [kabsch.variance() for kabsch in kabsch_debug_list]
elif display == "dispersion":
display_data = [kabsch.index_of_dispersion() for kabsch in kabsch_debug_list]
elif display == "sigma_b":
cv = [kabsch.index_of_dispersion() for kabsch in kabsch_debug_list]
display_data = (kabsch.cv_mask() for kabsch in kabsch_debug_list)
display_data = [_mask.as_1d().as_double() for _mask in display_data]
for i, _mask in enumerate(display_data):
_mask.reshape(cv[i].accessor())
elif display == "sigma_s":
cv = [kabsch.index_of_dispersion() for kabsch in kabsch_debug_list]
display_data = (kabsch.value_mask() for kabsch in kabsch_debug_list)
display_data = [_mask.as_1d().as_double() for _mask in display_data]
for i, _mask in enumerate(display_data):
_mask.reshape(cv[i].accessor())
elif display == "global_threshold":
cv = [kabsch.index_of_dispersion() for kabsch in kabsch_debug_list]
display_data = (kabsch.global_mask() for kabsch in kabsch_debug_list)
display_data = [_mask.as_1d().as_double() for _mask in display_data]
for i, _mask in enumerate(display_data):
_mask.reshape(cv[i].accessor())
elif display == "threshold":
cv = [kabsch.index_of_dispersion() for kabsch in kabsch_debug_list]
display_data = (kabsch.final_mask() for kabsch in kabsch_debug_list)
display_data = [_mask.as_1d().as_double() for _mask in display_data]
for i, _mask in enumerate(display_data):
_mask.reshape(cv[i].accessor())
return display_data
if __name__ == "__main__":
run()
| bsd-3-clause | d906256aaa0051ae98491e7035c95672 | 30.487421 | 88 | 0.613602 | 3.508409 | false | false | false | false |
dials/dials | src/dials/model/experiment/profile.py | 1 | 2922 | from __future__ import annotations
class ProfileModelExt:
"""
The definition for a profile model.
"""
@staticmethod
def create(
params, reflections, crystal, beam, detector, goniometer=None, scan=None
):
"""
Create the profile model from data.
:param params: The phil parameters
:param reflections: The reflections
:param crystal: The crystal model
:param beam: The beam model
:param detector: The detector model
:param goniometer: The goniometer model
:param scan: The scan model
:return: An instance of the profile model
"""
return None
def predict_reflections(
self, imageset, crystal, beam, detector, goniometer=None, scan=None, **kwargs
):
"""
Given an experiment, predict the reflections.
:param imageset: The imageset
:param crystal: The crystal model
:param beam: The beam model
:param detector: The detector model
:param goniometer: The goniometer model
:param scan: The scan model
"""
pass
def compute_partiality(
self, reflections, crystal, beam, detector, goniometer=None, scan=None, **kwargs
):
"""
Given an experiment and list of reflections, compute the partiality of the
reflections
:param reflections: The reflection table
:param crystal: The crystal model
:param beam: The beam model
:param detector: The detector model
:param goniometer: The goniometer model
:param scan: The scan model
"""
pass
def compute_bbox(
self, reflections, crystal, beam, detector, goniometer=None, scan=None, **kwargs
):
"""Given an experiment and list of reflections, compute the
bounding box of the reflections on the detector (and image frames).
:param reflections: The reflection table
:param crystal: The crystal model
:param beam: The beam model
:param detector: The detector model
:param goniometer: The goniometer model
:param scan: The scan model
"""
pass
def compute_mask(
self, reflections, crystal, beam, detector, goniometer=None, scan=None, **kwargs
):
"""
Given an experiment and list of reflections, compute the
foreground/background mask of the reflections.
:param reflections: The reflection table
:param crystal: The crystal model
:param beam: The beam model
:param detector: The detector model
:param goniometer: The goniometer model
:param scan: The scan model
"""
pass
def fitting_class(self):
"""
Get the profile fitting algorithm associated with this profile model
:return: The profile fitting class
"""
return None
| bsd-3-clause | 82caa9878e814e0762e4bfac9d61ac63 | 29.757895 | 88 | 0.619781 | 4.297059 | false | false | false | false |
dials/dials | src/dials/algorithms/scaling/scale_and_filter.py | 1 | 19957 | """Definitions of functions and classes for scaling and filtering algorithm."""
from __future__ import annotations
import math
from libtbx import phil
from scitbx.array_family import flex
from dials.report.plots import d_star_sq_to_d_ticks
phil_scope = phil.parse(
"""
filtering {
method = None deltacchalf
.type = choice
.help = "Choice of whether to do any filtering cycles, default None."
deltacchalf {
max_cycles = 6
.type = int(value_min=1)
max_percent_removed = 10
.type = float
min_completeness = None
.type = float(value_min=0, value_max=100)
.help = "Desired minimum completeness, as a percentage (0 - 100)."
mode = *dataset image_group
.type = choice
.help = "Perform analysis on whole datasets or batch groups"
group_size = 10
.type = int(value_min=1)
.help = "The number of images to group together when calculating delta"
"cchalf in image_group mode"
stdcutoff = 4.0
.type = float
.help = "Datasets with a ΔCC½ below (mean - stdcutoff*std) are removed"
}
output {
scale_and_filter_results = "scale_and_filter_results.json"
.type = str
.help = "Filename for output json of scale and filter results."
}
}
"""
)
def ordinal(n):
return "%d%s" % (n, "tsnrhtdd"[(n / 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4])
def log_cycle_results(results, scaling_script, filter_script):
"""Log results from the scripts for this cycle and add to the results."""
cycle_results = {"merging_stats": scaling_script.merging_statistics_result}
if not results.get_cycle_results():
results.initial_n_reflections = scaling_script.scaled_miller_array.size()
cycle_results["delta_cc_half_values"] = filter_script.results_summary[
"per_dataset_delta_cc_half_values"
]["delta_cc_half_values"]
cycle_results["mean_cc_half"] = filter_script.results_summary["mean_cc_half"]
removal_summary = filter_script.results_summary["dataset_removal"]
if removal_summary["mode"] == "image_group":
cycle_results["image_ranges_removed"] = removal_summary["image_ranges_removed"]
cycle_results["removed_datasets"] = removal_summary["experiments_fully_removed"]
cycle_results["removed_ids"] = removal_summary["experiment_ids_fully_removed"]
cycle_results["n_removed"] = filter_script.results_summary["dataset_removal"][
"n_reflections_removed"
]
n_removed = (
sum(res["n_removed"] for res in results.get_cycle_results())
+ cycle_results["n_removed"]
)
percent_removed = n_removed / results.initial_n_reflections * 100
cycle_results["cumul_percent_removed"] = percent_removed
results.add_cycle_results(cycle_results)
return results
class AnalysisResults:
"""Class to store results from scaling and filtering."""
def __init__(self):
self.termination_reason = None
self.cycle_results = []
self.initial_n_reflections = None
self.initial_expids_and_image_ranges = None
self.expids_and_image_ranges = None
self.final_stats = None
def add_cycle_results(self, results_dict):
"""Add the results dict from a scale and filter cycle."""
merging_stats_dict = self._parse_merging_stats(results_dict["merging_stats"])
results_dict["merging_stats"] = merging_stats_dict
self.cycle_results.append(results_dict)
@staticmethod
def _parse_merging_stats(merging_stats_obj):
merging_stats = {}
overall = merging_stats_obj.overall
merging_stats["ccs"] = [b.cc_one_half for b in merging_stats_obj.bins]
merging_stats["rmerge"] = [b.r_merge for b in merging_stats_obj.bins]
merging_stats["rpim"] = [b.r_pim for b in merging_stats_obj.bins]
merging_stats["d_min"] = [b.d_min for b in merging_stats_obj.bins]
merging_stats["overall"] = {
"cc_one_half": overall.cc_one_half,
"r_merge": overall.r_merge,
"r_pim": overall.r_pim,
"i_over_sigma_mean": overall.i_over_sigma_mean,
"completeness": 100 * overall.completeness,
"n_obs": overall.n_obs,
}
return merging_stats
def get_cycle_results(self):
"""Get the results from all cycles."""
return self.cycle_results
def get_last_cycle_results(self):
"""Get the results from the latest recorded cycle."""
return self.cycle_results[-1]
def add_final_stats(self, final_stats):
"""Add additional final merging stats from final rescale."""
self.final_stats = self._parse_merging_stats(final_stats)
def get_merging_stats(self):
"""Get all merging stats, including additional final stats if present."""
stats = [res["merging_stats"] for res in self.cycle_results]
if self.final_stats:
stats += [self.final_stats]
return stats
def finish(self, termination_reason):
"""Set the termination reason/"""
assert termination_reason in [
"no_more_removed",
"max_cycles",
"max_percent_removed",
"below_completeness_limit",
]
self.termination_reason = termination_reason
def to_dict(self):
"""Return the stored data as a dictionary."""
return {
"termination_reason": self.termination_reason,
"initial_n_reflections": self.initial_n_reflections,
"initial_expids_and_image_ranges": self.initial_expids_and_image_ranges,
"expids_and_image_ranges": self.expids_and_image_ranges,
"cycle_results": {i + 1: val for i, val in enumerate(self.cycle_results)},
"final_stats": self.final_stats,
}
@staticmethod
def from_dict(dictionary):
"""Configure the class from its dictionary form."""
results = AnalysisResults()
results.termination_reason = dictionary["termination_reason"]
results.initial_expids_and_image_ranges = dictionary[
"initial_expids_and_image_ranges"
]
results.expids_and_image_ranges = dictionary["expids_and_image_ranges"]
results.cycle_results = [
dictionary["cycle_results"][str(key)]
for key in sorted(int(k) for k in dictionary["cycle_results"].keys())
]
results.initial_n_reflections = dictionary["initial_n_reflections"]
results.final_stats = dictionary["final_stats"]
return results
def __str__(self):
"""Make summary of results."""
msg = "\nSummary of data removed:\n"
for i, res in enumerate(self.get_cycle_results()):
msg += f"Cycle number: {i + 1}\n"
if "image_ranges_removed" in res:
if res["image_ranges_removed"]:
removed = "\n ".join(
str(t[0]) + ", dataset " + str(t[1])
for t in res["image_ranges_removed"]
)
msg += f" Removed image ranges: \n {removed}"
else:
if res["removed_ids"]:
msg += f" Removed datasets: {res['removed_ids']}\n"
msg += (
" cumulative %% of reflections removed: %.3f\n"
% res["cumul_percent_removed"]
)
return msg
color_list = [
"#F44336",
"#FFC107",
"#FFEB3B",
"#8BC34A",
"#03A9F4",
"#3F51B5",
"#607D8B",
]
def make_scaling_filtering_plots(data):
"""Make the plots for scaling and filtering."""
d = make_filtering_merging_stats_plots(data["merging_stats"])
d.update(make_histogram_plots(data["cycle_results"]))
if data["mode"] == "image_group":
d.update(
make_reduction_plots(
data["initial_expids_and_image_ranges"], data["expids_and_image_ranges"]
)
)
return d
def make_filtering_merging_stats_plots(merging_stats):
"""Generate plotting dicts for merging statistics."""
n_datasets = len(merging_stats)
colors = [
(color_list * int(math.ceil(n_datasets / len(color_list))))[i]
for i in range(n_datasets)
]
colors[-1] = "k"
legends = ["initial scale"]
if len(merging_stats) > 2:
legends += [
ordinal(i) + " rescale" for i in range(1, len(merging_stats) - 1)
] + ["final rescale"]
elif len(merging_stats) == 2:
legends += ["final rescale"]
overall_ccs = [m["overall"]["cc_one_half"] for m in merging_stats]
overall_rpim = [m["overall"]["r_pim"] for m in merging_stats]
overall_ioversigma = [m["overall"]["i_over_sigma_mean"] for m in merging_stats]
overall_completeness = [m["overall"]["completeness"] for m in merging_stats]
# first make overall plots
d = {
"cc_one_half_vs_cycle": {
"data": [
{
"y": overall_ccs,
"x": list(range(1, n_datasets + 1)),
"type": "scatter",
"mode": "lines",
}
],
"layout": {
"title": "CC<sub>½</sub> vs cycle",
"xaxis": {"title": "Cycle number"},
"yaxis": {"title": "CC<sub>½</sub>"},
},
},
"r_pim_vs_cycle": {
"data": [
{
"y": overall_rpim,
"x": list(range(1, n_datasets + 1)),
"type": "scatter",
"mode": "lines",
}
],
"layout": {
"title": "R-pim vs cycle",
"xaxis": {"title": "Cycle number"},
"yaxis": {"title": "R-pim"},
},
},
"i_over_sigma_vs_cycle": {
"data": [
{
"y": overall_ioversigma,
"x": list(range(1, n_datasets + 1)),
"type": "scatter",
"mode": "lines",
}
],
"layout": {
"title": "<I/σ(I)> vs cycle",
"xaxis": {"title": "Cycle number"},
"yaxis": {"title": "<I/σ(I)>"},
},
},
"completeness_vs_cycle": {
"data": [
{
"y": overall_completeness,
"x": list(range(1, n_datasets + 1)),
"type": "scatter",
"mode": "lines",
}
],
"layout": {
"title": "Completeness vs cycle",
"xaxis": {"title": "Cycle number"},
"yaxis": {"title": "Completeness"},
},
},
}
cc_one_half_bins = merging_stats[0]["ccs"]
r_pim_bins = merging_stats[0]["rpim"]
r_merge_bins = merging_stats[0]["rmerge"]
resolution = [1.0 / x**2 for x in merging_stats[0]["d_min"]]
vals, txt = d_star_sq_to_d_ticks(resolution, 5)
d.update(
{
"cc_one_half_filter": {
"data": [
{
"x": resolution, # d_star_sq
"y": cc_one_half_bins,
"type": "scatter",
"name": legends[0],
"mode": "lines",
"line": {"color": colors[0]},
}
],
"layout": {
"title": "CC<sub>½</sub> vs resolution",
"xaxis": {
"title": "Resolution (Å)",
"tickvals": vals,
"ticktext": txt,
},
"yaxis": {"title": "CC<sub>½</sub>", "range": [0, 1]},
},
}
}
)
d.update(
{
"r_pim_filter": {
"data": [
{
"x": resolution, # d_star_sq
"y": r_pim_bins,
"type": "scatter",
"name": legends[0],
"mode": "lines",
"line": {"color": colors[0]},
}
],
"layout": {
"title": "R-pim vs resolution",
"xaxis": {
"title": "Resolution (Å)",
"tickvals": vals,
"ticktext": txt,
},
"yaxis": {
"title": "R-pim",
"range": [0, min(1.5, max(r_pim_bins))],
},
},
}
}
)
d.update(
{
"r_merge_filter": {
"data": [
{
"x": resolution, # d_star_sq
"y": r_merge_bins,
"type": "scatter",
"name": legends[0],
"mode": "lines",
"line": {"color": colors[0]},
}
],
"layout": {
"title": "R-merge vs resolution",
"xaxis": {
"title": "Resolution (Å)",
"tickvals": vals,
"ticktext": txt,
},
"yaxis": {
"title": "R-merge",
"range": [0, min(1.5, max(r_merge_bins))],
},
},
}
}
)
for c, stats in enumerate(merging_stats[1:]):
cc_one_half_bins = stats["ccs"]
r_pim_bins = stats["rpim"]
r_merge_bins = stats["rmerge"]
resolution = [1.0 / x**2 for x in stats["d_min"]]
d["cc_one_half_filter"]["data"].append(
{
"x": resolution, # d_star_sq
"y": cc_one_half_bins,
"type": "scatter",
"name": legends[c + 1],
"line": {"color": colors[c + 1]},
}
)
d["r_pim_filter"]["data"].append(
{
"x": resolution, # d_star_sq
"y": r_pim_bins,
"type": "scatter",
"name": legends[c + 1],
"line": {"color": colors[c + 1]},
}
)
d["r_merge_filter"]["data"].append(
{
"x": resolution, # d_star_sq
"y": r_merge_bins,
"type": "scatter",
"name": legends[c + 1],
"line": {"color": colors[c + 1]},
}
)
return d
def make_histogram_plots(cycle_results):
"""Make the histogram plots."""
delta_cc_half_lists = [res["delta_cc_half_values"] for res in cycle_results]
if not delta_cc_half_lists:
return {}
n = len(delta_cc_half_lists)
overall_mean_ccs = [res["mean_cc_half"] for res in cycle_results]
d = {
"mean_cc_one_half_vs_cycle": {
"data": [
{
"y": overall_mean_ccs,
"x": list(range(1, n + 1)),
"type": "scatter",
"mode": "lines",
}
],
"layout": {
"title": "Resolution-averaged CC<sub>½</sub> (σ-τ) vs cycle",
"xaxis": {"title": "Cycle number"},
"yaxis": {"title": "Resolution-averaged CC<sub>½</sub> (σ-τ)"},
},
}
}
colors = [(color_list * int(math.ceil(n / len(color_list))))[i] for i in range(n)]
if n == 1:
legends = ["ΔCC<sub>½</sub> analysis"]
else:
legends = [ordinal(i) + " ΔCC<sub>½</sub> analysis" for i in range(1, n + 1)]
if "image_ranges_removed" in cycle_results[0]:
n_rej = [len(res["image_ranges_removed"]) for res in cycle_results]
else:
n_rej = [len(res["removed_datasets"]) for res in cycle_results]
def _color_bar_charts(counts, index):
n = 0
bar_colors = []
n_rej_this = n_rej[index]
for count in counts:
if n >= n_rej_this:
bar_colors.append(colors[index])
else:
bar_colors.append("k")
n += count
return bar_colors
def _add_new_histogram(d, hist, index):
d.update(
{
f"scale_filter_histograms_{index}": {
"data": [
{
"x": list(hist.slot_centers()),
"y": list(hist.slots()),
"type": "bar",
"name": legends[index],
"marker": {"color": _color_bar_charts(hist.slots(), index)},
}
],
"layout": {
"title": f"{legends[index]}",
"xaxis": {"title": "ΔCC<sub>½</sub>"},
"yaxis": {
"title": "Number of datasets/groups",
"range": [0, min(max(hist.slots()), 50)],
},
"bargap": 0,
},
}
}
)
for c, deltas in enumerate(delta_cc_half_lists):
flex_deltas = flex.double(deltas) # convert list to flex array
if flex_deltas.all_eq(flex_deltas[0]):
continue
hist = flex.histogram(
flex_deltas * 100,
flex.min(flex_deltas) * 100,
flex.max(flex_deltas) * 100,
n_slots=40,
)
_add_new_histogram(d, hist, c)
return d
def make_per_dataset_plot(delta_cchalf_i):
"""Make a line plot of ΔCC½ per group."""
return {
"per_dataset_plot": {
"data": [
{
"y": [i * 100 for i in list(delta_cchalf_i.values())],
"x": list(delta_cchalf_i.keys()),
"type": "scatter",
"mode": "lines",
}
],
"layout": {
"title": "ΔCC<sub>½</sub> vs group",
"xaxis": {"title": "Group number"},
"yaxis": {"title": "ΔCC<sub>½</sub>"},
},
}
}
def make_reduction_plots(initial_expids_and_image_ranges, expids_and_image_ranges):
"""Make a chart showing excluded image ranges."""
x = list(range(len(initial_expids_and_image_ranges)))
initial_n_images = []
initial_expids = []
for expid_and_img in initial_expids_and_image_ranges:
initial_n_images.append(expid_and_img[1][1] - expid_and_img[1][0] + 1)
initial_expids.append(expid_and_img[0])
final_n_images = [0] * len(x)
for expid, valid in expids_and_image_ranges:
loc = initial_expids.index(expid)
for t in valid:
final_n_images[loc] = t[1] - t[0] + 1
n_removed = [i - j for (i, j) in zip(initial_n_images, final_n_images)]
d = {
"reduction_plot": {
"data": [
{
"x": x,
"y": final_n_images,
"type": "bar",
"name": "final image ranges",
},
{"x": x, "y": n_removed, "type": "bar", "name": "removed image ranges"},
],
"layout": {
"title": "Image range plot",
"xaxis": {"title": "Experiment number"},
"yaxis": {"title": "Image number"},
"bargap": 0,
"barmode": "stack",
},
}
}
return d
| bsd-3-clause | a971f491670eb3f2691d62390bafa03f | 33.961404 | 88 | 0.464723 | 3.877797 | false | false | false | false |
douban/dpark | dpark/utils/hotcounter.py | 1 | 1593 | from __future__ import absolute_import
from __future__ import print_function
import operator
import six
from six.moves import range
class HotCounter(object):
def __init__(self, vs=None, limit=20):
if vs is None:
vs = []
self.limit = limit
self.total = {}
self.updates = {}
self._max = 0
for v in vs:
self.add(v)
def add(self, v):
c = self.updates.get(v, 0) + 1
self.updates[v] = c
if c > self._max:
self._max = c
if len(self.updates) > self.limit * 5 and self._max > 5:
self._merge()
def _merge(self):
for k, c in six.iteritems(self.updates):
if c > 1:
self.total[k] = self.total.get(k, 0) + c
self._max = 0
self.updates = {}
if len(self.total) > self.limit * 5:
self.total = dict(self.top(self.limit * 3))
def update(self, o):
self._merge()
if isinstance(o, HotCounter):
o._merge()
for k, c in six.iteritems(o.total):
self.total[k] = self.total.get(k, 0) + c
def top(self, limit):
return sorted(list(self.total.items()), key=operator.itemgetter(1), reverse=True)[:limit]
def test():
import random
import math
t = HotCounter()
for j in range(10):
c = HotCounter()
for i in range(10000):
v = int(math.sqrt(random.randint(0, 1000000)))
c.add(v)
t.update(c)
for k, v in t.top(20):
print(k, v)
if __name__ == '__main__':
test()
| bsd-3-clause | 508b659df54d9596249061097ade8617 | 23.507692 | 97 | 0.511613 | 3.425806 | false | false | false | false |
dials/dials | src/dials/algorithms/refinement/parameterisation/crystal_parameters.py | 1 | 7778 | from __future__ import annotations
import logging
logger = logging.getLogger(__name__)
from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge
from scitbx import matrix
from dials.algorithms.refinement.parameterisation.model_parameters import (
ModelParameterisation,
Parameter,
)
from dials.algorithms.refinement.refinement_helpers import CrystalOrientationCompose
class CrystalOrientationMixin:
"""Mix-in class defining some functionality unique to crystal orientation
parameterisations that can be shared by static and scan-varying versions"""
@staticmethod
def _build_p_list(parameter_type=Parameter):
"""Build the list of parameters, using the parameter_type callback to
select between versions of the Parameter class"""
# set up the parameters
phi1 = parameter_type(0.0, matrix.col((1, 0, 0)), "angle (mrad)", "Phi1")
phi2 = parameter_type(0.0, matrix.col((0, 1, 0)), "angle (mrad)", "Phi2")
phi3 = parameter_type(0.0, matrix.col((0, 0, 1)), "angle (mrad)", "Phi3")
# build the parameter list in a specific, maintained order
p_list = [phi1, phi2, phi3]
return p_list
class CrystalOrientationParameterisation(
ModelParameterisation, CrystalOrientationMixin
):
"""A parameterisation of the orientation of a Crystal model.
The Crystal orientation matrix U is parameterised by three Tait-Bryan angles
expressed in mrad"""
def __init__(self, crystal, experiment_ids=None):
"""Initialise the CrystalOrientationParameterisation object
Args:
crystal: A dxtbx Crystal object to be parameterised.
experiment_ids (list): The experiment IDs affected by this
parameterisation. Defaults to None, which is replaced by [0].
"""
# The state of a crystal orientation parameterisation is an orientation
# matrix '[U]'. The initial state is a snapshot of the crystal
# orientation at the time of initialisation '[U0]'. Future states are
# composed by rotations around axes of the phi-axis frame by Tait-Bryan
# angles.
#
# [U] = [Phi3][Phi2][Phi1][U0]
# set up the initial state
if experiment_ids is None:
experiment_ids = [0]
istate = matrix.sqr(crystal.get_U())
# build the parameter list
p_list = self._build_p_list()
# set up the base class
ModelParameterisation.__init__(
self, crystal, istate, p_list, experiment_ids=experiment_ids
)
# call compose to calculate all the derivatives
self.compose()
return
def compose(self):
# Extract orientation from the initial state
U0 = self._initial_state
# extract parameters from the internal list
phi1, phi2, phi3 = self._param
# calculate using the helper class
coc = CrystalOrientationCompose(
U0, phi1.value, phi1.axis, phi2.value, phi2.axis, phi3.value, phi3.axis
)
# compose new state
self._model.set_U(coc.U())
# store derivatives
self._dstate_dp = [coc.dU_dphi1(), coc.dU_dphi2(), coc.dU_dphi3()]
return
def get_state(self):
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return matrix.sqr(self._model.get_U())
class CrystalUnitCellMixin:
"""Mix-in class defining some functionality unique to crystal unit cell
parameterisations that can be shared by static and scan-varying versions"""
def _build_p_list(self, crystal, parameter_type=Parameter):
"""Build the list of parameters, using the parameter_type callback to
select between versions of the Parameter class"""
# Set up symmetrizing object
S = symmetrize_reduce_enlarge(crystal.get_space_group())
S.set_orientation(orientation=crystal.get_B())
X = S.forward_independent_parameters()
# Set up the independent parameters, with a change of scale
p_list = [
parameter_type(e * 1.0e5, name="g_param_%d" % i) for i, e in enumerate(X)
]
return p_list
def _compose_core(self, raw_vals):
# obtain metrical matrix parameters on natural scale
vals = [v * 1.0e-5 for v in raw_vals]
# set parameter values in the symmetrizing object and obtain new B
S = symmetrize_reduce_enlarge(self._model.get_space_group())
S.set_orientation(orientation=self._model.get_B())
S.forward_independent_parameters() # Set Bconverter as side-effect
try:
newB = matrix.sqr(S.backward_orientation(vals).reciprocal_matrix())
except RuntimeError as e:
# write original error to debug log
logger.debug("Unable to compose the crystal model")
logger.debug("Original error message: %s", str(e))
logger.debug("Failing now.")
raise RuntimeError(
"Unable to compose the crystal model. Please check that the "
"experiments match the indexing of the reflections."
)
# returns the independent parameters given the set_orientation() B matrix
# used here for side effects
S.forward_independent_parameters()
# get the derivatives of state wrt metrical matrix parameters on the
# adjusted sale
dB_dval = [matrix.sqr(g) * 1.0e-5 for g in S.forward_gradients()]
return newB, dB_dval
class CrystalUnitCellParameterisation(ModelParameterisation, CrystalUnitCellMixin):
"""A parameterisation for the unit cell of a Crystal model.
The Crystal reciprocal space orthogonalisation matrix B is parameterised
using up to 6 metrical matrix elements, rescaled by a multiplicative factor.
"""
def __init__(self, crystal, experiment_ids=None):
"""Initialise the CrystalUnitCellParameterisation object
Args:
crystal: A dxtbx Crystal object to be parameterised.
experiment_ids (list): The experiment IDs affected by this
parameterisation. Defaults to None, which is replaced by [0].
"""
# The state of the unit cell parameterisation is the reciprocal space
# orthogonalisation matrix 'B'. The initial state is irrelevant for
# this model, as composition of a new B matrix and derivatives can be
# done with just the values of 6 unit cell parameters, without
# defining axial directions (which are selected by choice of the PDB
# convention). For this reason also, the axes of the
# parameters are irrelevant and are set here to None.
### Set up the initial state
if experiment_ids is None:
experiment_ids = [0]
istate = None
# build the parameter list
p_list = self._build_p_list(crystal)
# set up the base class
ModelParameterisation.__init__(
self, crystal, istate, p_list, experiment_ids=experiment_ids
)
# call compose to calculate all the derivatives
self.compose()
return
def compose(self):
# calculate new B and derivatives
newB, self._dstate_dp = self._compose_core([p.value for p in self._param])
# Now pass new B to the crystal model
self._model.set_B(newB)
return
def get_state(self):
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return matrix.sqr(self._model.get_B())
def set_state_uncertainties(self, var_cov, multi_state_elt=None):
self._model.set_B_covariance(var_cov)
return
| bsd-3-clause | 580b013adde02d67bc6f6329af531187 | 33.878924 | 85 | 0.647339 | 3.968367 | false | false | false | false |
dials/dials | tests/algorithms/image/connected_components/test_connected_components.py | 1 | 6312 | from __future__ import annotations
class Test2d:
def setup_class(self):
from dials.algorithms.image.connected_components import LabelImageStack2d
self.size = (500, 500)
self.label_images = LabelImageStack2d(self.size)
from scitbx.array_family import flex
self.data_list = []
self.mask_list = []
for i in range(10):
data = flex.random_int_gaussian_distribution(
self.size[0] * self.size[1], 100, 10
)
data.reshape(flex.grid(self.size))
mask = flex.random_bool(self.size[0] * self.size[1], 0.1)
mask.reshape(flex.grid(self.size))
self.data_list.append(data)
self.mask_list.append(mask)
for i in range(10):
self.label_images.add_image(self.data_list[i], self.mask_list[i])
self.labels = self.label_images.labels()
self.coords = self.label_images.coords()
self.values = list(self.label_images.values())
assert len(self.labels) > 0
assert len(self.labels) == len(self.coords)
assert len(self.labels) == len(self.values)
def test_coords_are_valid(self):
# Ensure that the values are all ok and in the right order
vi = 0
for k in range(10):
ind = 0
for j in range(self.size[0]):
for i in range(self.size[1]):
m = self.mask_list[k][ind]
if m:
c1 = (k, j, i)
c2 = self.coords[vi]
vi += 1
assert c1 == c2
ind += 1
def test_values_are_valid(self):
# Ensure that the values are all ok and in the right order
vi = 0
for k in range(10):
for d, m in zip(self.data_list[k], self.mask_list[k]):
if m:
v1 = d
v2 = self.values[vi]
vi += 1
assert v1 == v2
def test_labels_are_valid(self):
from scitbx.array_family import flex
# Create a map of labels
label_map = flex.int(flex.grid(10, self.size[0], self.size[1]))
for c, l in zip(self.coords, self.labels):
assert c[0] >= 0 and c[0] < 10
assert c[1] >= 0 and c[1] < self.size[0]
assert c[2] >= 0 and c[2] < self.size[1]
label_map[c] = l
# Ensure all labels are correct
vi = 0
for k in range(10):
for j in range(self.size[0]):
for i in range(self.size[1]):
if self.mask_list[k][j, i]:
l1 = self.labels[vi]
if k > 0 and self.mask_list[k - 1][j, i]:
l2 = label_map[k - 1, j, i]
assert l2 != l1
if j > 0 and self.mask_list[k][j - 1, i]:
l2 = label_map[k, j - 1, i]
assert l2 == l1
if i > 0 and self.mask_list[k][j, i - 1]:
l2 = label_map[k, j, i - 1]
assert l2 == l1
vi += 1
class Test3d:
def setup_class(self):
from dials.algorithms.image.connected_components import LabelImageStack3d
self.size = (500, 500)
self.label_images = LabelImageStack3d(self.size)
from scitbx.array_family import flex
self.data_list = []
self.mask_list = []
for i in range(10):
data = flex.random_int_gaussian_distribution(
self.size[0] * self.size[1], 100, 10
)
data.reshape(flex.grid(self.size))
mask = flex.random_bool(self.size[0] * self.size[1], 0.1)
mask.reshape(flex.grid(self.size))
self.data_list.append(data)
self.mask_list.append(mask)
for i in range(10):
self.label_images.add_image(self.data_list[i], self.mask_list[i])
self.labels = self.label_images.labels()
self.coords = self.label_images.coords()
self.values = list(self.label_images.values())
assert len(self.labels) > 0
assert len(self.labels) == len(self.coords)
assert len(self.labels) == len(self.values)
def test_coords_are_valid(self):
# Ensure that the values are all ok and in the right order
vi = 0
for k in range(10):
ind = 0
for j in range(self.size[0]):
for i in range(self.size[1]):
m = self.mask_list[k][ind]
if m:
c1 = (k, j, i)
c2 = self.coords[vi]
vi += 1
assert c1 == c2
ind += 1
def test_values_are_valid(self):
# Ensure that the values are all ok and in the right order
vi = 0
for k in range(10):
for d, m in zip(self.data_list[k], self.mask_list[k]):
if m:
v1 = d
v2 = self.values[vi]
vi += 1
assert v1 == v2
def test_labels_are_valid(self):
from scitbx.array_family import flex
# Create a map of labels
label_map = flex.int(flex.grid(10, self.size[0], self.size[1]))
for c, l in zip(self.coords, self.labels):
label_map[c] = l
# Ensure all labels are correct
vi = 0
for k in range(10):
for j in range(self.size[0]):
for i in range(self.size[1]):
if self.mask_list[k][j, i]:
l1 = self.labels[vi]
if k > 0 and self.mask_list[k - 1][j, i]:
l2 = label_map[k - 1, j, i]
assert l2 == l1
if j > 0 and self.mask_list[k][j - 1, i]:
l2 = label_map[k, j - 1, i]
assert l2 == l1
if i > 0 and self.mask_list[k][j, i - 1]:
l2 = label_map[k, j, i - 1]
assert l2 == l1
vi += 1
| bsd-3-clause | 1179d2d7a1bf33ffd0120ac8e9d71f1a | 34.863636 | 81 | 0.460551 | 3.752675 | false | false | false | false |
dials/dials | src/dials/command_line/slice_sequence.py | 1 | 8936 | from __future__ import annotations
from os.path import basename, splitext
from dxtbx.model.experiment_list import ExperimentList
import dials.util
from dials.algorithms.refinement.refinement_helpers import calculate_frame_numbers
from dials.array_family import flex
from dials.util import Sorry
from dials.util.multi_dataset_handling import generate_experiment_identifiers
from dials.util.slice import slice_experiments, slice_reflections
help_message = """
Slice a sequence to produce a smaller sequence within the bounds of the original. If
experiments or experiments are provided, modify the scan objects within these. If
reflections are provided, remove reflections outside the provided image ranges.
Each image_range parameter refers to a single experiment ID, counting up from
zero. Any reflections with experiment ID not matched by a image_range parameter
are removed.
Examples::
dials.slice_sequence models.expt observations.refl "image_range=1 20"
dials.slice_sequence models.expt "image_range=1 20"
# two experiments and reflections with IDs '0' and '1'
dials.slice_sequence models.expt observations.refl \
"image_range=1 20" "image_range=5 30"
"""
from libtbx.phil import parse
phil_scope = parse(
"""
output {
reflections_filename = None
.type = str
.help = "The filename for output reflections sliced to remove those"
"outside the reduced image range. By default generated"
"automatically from the input name"
experiments_filename = None
.type = str
.help = "The filename for the output experiments with sliced scans.
By default generated automatically from the input name"
}
image_range = None
.help = "Range in images to slice a sequence. The number of arguments"
"must be a factor of two. Each pair of arguments gives a range"
"that follows C conventions (e.g. j0 <= j < j1) when slicing the"
"reflections by observed centroid."
.type = ints(size=2)
.multiple = True
block_size = None
.type = float
.help = "Overrides image_range if present. This option splits each sequence"
"into the nearest integer number of equal size blocks close to"
"block_size degrees in width"
"""
)
def calculate_block_ranges(scan, block_size):
"""
:param scans
:type a scan object
:param block_size:
:type block_size: target block size in degrees"""
image_ranges = []
nimages = scan.get_num_images()
osc_range = scan.get_oscillation_range(deg=True)
osc_width = abs(osc_range[1] - osc_range[0])
nblocks = max(int(round(osc_width / block_size)), 1)
nblocks = min(nblocks, nimages)
# equal sized blocks except the last one that may contain extra images
# to make up the remainder
nimages_per_block = [nimages // nblocks] * (nblocks - 1) + [
nimages // nblocks + nimages % nblocks
]
start = scan.get_image_range()[0]
for nim in nimages_per_block:
image_ranges.append((start, start + nim - 1))
start += nim
return image_ranges
class Script:
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import ArgumentParser
usage = (
"usage: dials.slice_sequence [options] [param.phil] "
"models.expt observations.refl"
)
# Create the parser
self.parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_reflections=True,
read_experiments=True,
check_format=False,
epilog=help_message,
)
def run(self, args=None):
"""Execute the script."""
from dials.util.options import reflections_and_experiments_from_files
# Parse the command line
params, options = self.parser.parse_args(args, show_diff_phil=True)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
# Try to load the models and data
slice_exps = len(experiments) > 0
slice_refs = len(reflections) > 0
# Catch case of nothing to do
if not slice_exps and not slice_refs:
print("No suitable input provided")
self.parser.print_help()
return
if reflections:
if len(reflections) > 1:
raise Sorry("Only one reflections list can be imported at present")
reflections = reflections[0]
# calculate frame numbers if needed
if experiments:
reflections = calculate_frame_numbers(reflections, experiments)
# if we still don't have the right column give up
if "xyzobs.px.value" not in reflections:
raise Sorry(
"These reflections do not have frame numbers set, and "
"there are no experiments provided to calculate these."
)
# set trivial case where no scan range is provided at all
if not params.image_range:
params.image_range = [None]
# check if slicing into blocks
if params.block_size is not None:
if not slice_exps:
raise Sorry(
"For slicing into blocks, an experiment file must be provided"
)
if len(experiments) > 1:
raise Sorry("For slicing into blocks please provide a single scan only")
scan = experiments[0].scan
# Having extracted the scan, calculate the blocks
params.image_range = calculate_block_ranges(scan, params.block_size)
# Do the slicing then recombine
sliced = [
slice_experiments(experiments, [sr])[0] for sr in params.image_range
]
generate_experiment_identifiers(sliced)
sliced_experiments = ExperimentList(sliced)
# slice reflections if present
if slice_refs:
sliced = [
slice_reflections(reflections, [sr]) for sr in params.image_range
]
sliced_reflections = flex.reflection_table()
identifiers = sliced_experiments.identifiers()
# resetting experiment identifiers
for i, rt in enumerate(sliced):
for k in rt.experiment_identifiers().keys():
del rt.experiment_identifiers()[k]
rt["id"] = flex.int(rt.size(), i) # set id
rt.experiment_identifiers()[i] = identifiers[i]
sliced_reflections.extend(rt)
else:
# slice each dataset into the requested subset
if slice_exps:
sliced_experiments = slice_experiments(experiments, params.image_range)
if slice_refs:
sliced_reflections = slice_reflections(reflections, params.image_range)
# Save sliced experiments
if slice_exps:
output_experiments_filename = params.output.experiments_filename
if output_experiments_filename is None:
# take first filename as template
bname = basename(params.input.experiments[0].filename)
bname = splitext(bname)[0]
if not bname:
bname = "experiments"
if len(params.image_range) == 1 and params.image_range[0] is not None:
ext = "_{}_{}.expt".format(*params.image_range[0])
else:
ext = "_sliced.expt"
output_experiments_filename = bname + ext
print(f"Saving sliced experiments to {output_experiments_filename}")
sliced_experiments.as_file(output_experiments_filename)
# Save sliced reflections
if slice_refs:
output_reflections_filename = params.output.reflections_filename
if output_reflections_filename is None:
# take first filename as template
bname = basename(params.input.reflections[0].filename)
bname = splitext(bname)[0]
if not bname:
bname = "reflections"
if len(params.image_range) == 1 and params.image_range[0] is not None:
ext = "_{}_{}.refl".format(*params.image_range[0])
else:
ext = "_sliced.refl"
output_reflections_filename = bname + ext
print(f"Saving sliced reflections to {output_reflections_filename}")
sliced_reflections.as_file(output_reflections_filename)
return
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
| bsd-3-clause | 517252d1a51a026bcfef991487f9d1f6 | 34.744 | 88 | 0.606647 | 4.211122 | false | false | false | false |
dials/dials | src/dials/algorithms/profile_model/ellipsoid/indexer.py | 1 | 7256 | from __future__ import annotations
import logging
from math import floor, sqrt
import numpy as np
from numpy.linalg import inv, norm
from cctbx.array_family import flex
from dxtbx import flumpy
from scitbx import matrix
from dials.algorithms.profile_model.ellipsoid import chisq_quantile
from dials.algorithms.statistics.fast_mcd import FastMCD, maha_dist_sq
logger = logging.getLogger("dials")
def _index(reflection_table, experiment, fail_on_bad_index=False):
"""Index the strong spots"""
# Get some stuff from experiment
A = np.array(experiment.crystal.get_A(), dtype=np.float64).reshape(3, 3)
s0 = np.array([experiment.beam.get_s0()], dtype=np.float64).reshape(3, 1)
s0_length = norm(s0)
detector = experiment.detector
# Create array if necessary
if "miller_index" not in reflection_table:
reflection_table["miller_index"] = flex.miller_index(len(reflection_table))
# Index all the reflections
miller_index = reflection_table["miller_index"]
selection = flex.size_t()
num_reindexed = 0
for i, xyz in enumerate(reflection_table["xyzobs.px.value"]):
# Get the observed pixel coordinate
x, y, _ = xyz
# Get the lab coord
s1 = np.array(
detector[0].get_pixel_lab_coord((x, y)), dtype=np.float64
).reshape(3, 1)
s1_norm = norm(s1)
s1 *= s0_length / s1_norm
# Get the reciprocal lattice vector
r = s1 - s0
# Compute the fractional miller index
hf = np.matmul(inv(A), r)
# Compute the integer miller index
h = np.array([int(floor(j + 0.5)) for j in hf[:, 0]], dtype=int).reshape(3, 1)
# Print warning if reindexing
if tuple(h) != miller_index[i]:
logger.warn(
"Reindexing (% 3d, % 3d, % 3d) -> (% 3d, % 3d, % 3d)"
% (miller_index[i] + tuple(h))
)
num_reindexed += 1
miller_index[i] = matrix.col(flumpy.from_numpy(h))
if fail_on_bad_index:
raise RuntimeError("Bad index")
# If its not indexed as 0, 0, 0 then append
if h.any() and norm(h - hf) < 0.3:
selection.append(i)
# Print some info
logger.info(
"Reindexed %d/%d input reflections" % (num_reindexed, len(reflection_table))
)
logger.info(
"Selected %d/%d input reflections" % (len(selection), len(reflection_table))
)
# Select all the indexed reflections
reflection_table.set_flags(selection, reflection_table.flags.indexed)
reflection_table = reflection_table.select(selection)
return reflection_table
def _predict(reflection_table, experiment):
"""
Predict the position of the spots
"""
# Get some stuff from experiment
A = np.array(experiment.crystal.get_A(), dtype=np.float64).reshape((3, 3))
s0 = np.array([experiment.beam.get_s0()], dtype=np.float64).reshape(3, 1)
s0_length = norm(s0)
# Compute the vector to the reciprocal lattice point
# since this is not on the ewald sphere, lets call it s2
s1 = flex.vec3_double(reflection_table.size())
s2 = flex.vec3_double(reflection_table.size())
for i, h in enumerate(reflection_table["miller_index"]):
r = np.matmul(A, np.array([h], dtype=np.float64).reshape(3, 1))
s2_i = r + s0
s2[i] = matrix.col(flumpy.from_numpy(s2_i))
s1[i] = matrix.col(flumpy.from_numpy(s2_i * s0_length / norm(s2_i)))
reflection_table["s1"] = s1
reflection_table["s2"] = s2
reflection_table["entering"] = flex.bool(reflection_table.size(), False)
# Compute the ray intersections
xyzpx = flex.vec3_double()
xyzmm = flex.vec3_double()
for ss in s1:
mm = experiment.detector[0].get_ray_intersection(ss)
px = experiment.detector[0].millimeter_to_pixel(mm)
xyzpx.append(px + (0,))
xyzmm.append(mm + (0,))
reflection_table["xyzcal.mm"] = xyzmm
reflection_table["xyzcal.px"] = xyzpx
logger.info("Do prediction for %d reflections" % len(reflection_table))
return reflection_table
def _filter_reflections_based_on_centroid_distance(
reflection_table,
experiment,
outlier_probability=0.975,
max_separation=2,
):
"""
Filter reflections too far from predicted position
"""
# Compute the x and y residuals
Xobs, Yobs, _ = reflection_table["xyzobs.px.value"].parts()
Xcal, Ycal, _ = reflection_table["xyzcal.px"].parts()
Xres = Xobs - Xcal
Yres = Yobs - Ycal
# Compute the epsilon residual
s0_length = 1.0 / experiment.beam.get_wavelength()
s1x, s1y, s1z = reflection_table["s2"].parts()
s1_length = flex.sqrt(s1x**2 + s1y**2 + s1z**2)
Eres = s1_length - s0_length
# Initialise the fast_mcd outlier algorithm
# fast_mcd = FastMCD((Xres, Yres, Eres))
try:
fast_mcd = FastMCD((Xres, Yres))
except AssertionError as e:
raise RuntimeError(e)
# get location and MCD scatter estimate
T, S = fast_mcd.get_corrected_T_and_S()
# get squared Mahalanobis distances
# d2s = maha_dist_sq((Xres, Yres, Eres), T, S)
d2s = maha_dist_sq((Xres, Yres), T, S)
# Compute the cutoff
mahasq_cutoff = chisq_quantile(2, outlier_probability)
# compare to the threshold and select reflections
selection1 = d2s < mahasq_cutoff
selection2 = flex.sqrt(Xres**2 + Yres**2) < max_separation
selection = selection1 & selection2
reflection_table = reflection_table.select(selection)
n_refl = reflection_table.size()
# Print some stuff
logger.info("-" * 80)
logger.info("Centroid outlier rejection")
logger.info(f" Using MCD algorithm with probability = {outlier_probability}")
logger.info(" Max X residual: %f" % flex.max(flex.abs(Xres)))
logger.info(" Max Y residual: %f" % flex.max(flex.abs(Yres)))
logger.info(" Max E residual: %f" % flex.max(flex.abs(Eres)))
logger.info(" Mean X RMSD: %f" % (sqrt(flex.sum(Xres**2) / len(Xres))))
logger.info(" Mean Y RMSD: %f" % (sqrt(flex.sum(Yres**2) / len(Yres))))
logger.info(" Mean E RMSD: %f" % (sqrt(flex.sum(Eres**2) / len(Eres))))
logger.info(" MCD location estimate: %.4f, %.4f" % tuple(T))
logger.info(
""" MCD scatter estimate:
%.7f, %.7f,
%.7f, %.7f"""
% tuple(S)
)
logger.info(" Number of outliers: %d" % selection1.count(False))
logger.info(
" Number of reflections with residual > %0.2f pixels: %d"
% (max_separation, selection2.count(False))
)
logger.info(f"Number of reflections selection for refinement: {n_refl}")
logger.info("-" * 80)
return reflection_table
def reindex(
reflection_table,
experiment,
outlier_probability=0.975,
max_separation=2,
fail_on_bad_index=False,
):
"""Reindex strong spots and perform filtering"""
reflection_table = _index(reflection_table, experiment, fail_on_bad_index)
reflection_table = _predict(reflection_table, experiment)
reflection_table = _filter_reflections_based_on_centroid_distance(
reflection_table,
experiment,
outlier_probability=outlier_probability,
max_separation=max_separation,
)
return reflection_table
| bsd-3-clause | e0047b382072ee9e0a86a285f21fb547 | 33.226415 | 86 | 0.633958 | 3.26259 | false | false | false | false |
dials/dials | src/dials/util/image_viewer/slip_viewer/uc_frame.py | 1 | 14583 | from __future__ import annotations
import math
import wx
from wx.lib.agw.floatspin import EVT_FLOATSPIN, FloatSpin
import cctbx.miller
from cctbx.crystal import symmetry
from scitbx.matrix import col
class UCSettingsFrame(wx.MiniFrame):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
szr = wx.BoxSizer(wx.VERTICAL)
self.phil_params = args[0].params
panel = UCSettingsPanel(self)
self.SetSizer(szr)
szr.Add(panel, 1, wx.EXPAND)
szr.Fit(panel)
self.panel = panel
self.sizer = szr
self.Fit()
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy(), self)
class UCSettingsPanel(wx.Panel):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.phil_params = args[0].phil_params
# Needed to draw and delete the rings. XXX Applies to
# calibration_frame as well?
self._pyslip = self.GetParent().GetParent().pyslip
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
# Number of decimal digits for distances.
self.digits = 2
# Wavelength control.
beam = self._pyslip.tiles.raw_image.get_beam()
self._wavelength = beam.get_wavelength()
# Unit cell controls.
if self.phil_params.calibrate_unit_cell.unit_cell is not None:
self._cell = list(
self.phil_params.calibrate_unit_cell.unit_cell.parameters()
)
else:
self._cell = [4.18, 4.72, 58.38, 89.44, 89.63, 75.85]
if self.phil_params.calibrate_unit_cell.space_group is not None:
self._spacegroup = self.phil_params.calibrate_unit_cell.space_group
else:
self._spacegroup = "P1"
self._show_hkl = self.phil_params.calibrate_unit_cell.show_hkl
self._cell_control_names = [
"uc_a_ctrl",
"uc_b_ctrl",
"uc_c_ctrl",
"uc_alpha_ctrl",
"uc_beta_ctrl",
"uc_gamma_ctrl",
]
box = wx.BoxSizer(wx.HORIZONTAL)
self.uc_a = FloatSpin(
self,
digits=self.digits,
name=self._cell_control_names[0],
value=self._cell[0],
)
box.Add(
self.uc_a, 0, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 5
)
box.Add(wx.StaticText(self, label="a"), 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(EVT_FLOATSPIN, self.OnSpinCell, self.uc_a)
self.uc_alpha = FloatSpin(
self,
digits=self.digits,
name=self._cell_control_names[3],
value=self._cell[3],
)
box.Add(
self.uc_alpha,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="alpha"), 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5
)
self.Bind(EVT_FLOATSPIN, self.OnSpinCell, self.uc_alpha)
sizer.Add(box)
box = wx.BoxSizer(wx.HORIZONTAL)
self.uc_b = FloatSpin(
self,
digits=self.digits,
name=self._cell_control_names[1],
value=self._cell[1],
)
box.Add(
self.uc_b, 0, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 5
)
box.Add(wx.StaticText(self, label="b"), 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(EVT_FLOATSPIN, self.OnSpinCell, self.uc_b)
self.uc_beta = FloatSpin(
self,
digits=self.digits,
name=self._cell_control_names[4],
value=self._cell[4],
)
box.Add(
self.uc_beta, 0, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 5
)
box.Add(
wx.StaticText(self, label="beta"), 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5
)
self.Bind(EVT_FLOATSPIN, self.OnSpinCell, self.uc_beta)
sizer.Add(box)
box = wx.BoxSizer(wx.HORIZONTAL)
self.uc_c = FloatSpin(
self,
digits=self.digits,
name=self._cell_control_names[2],
value=self._cell[2],
)
box.Add(
self.uc_c, 0, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 5
)
box.Add(wx.StaticText(self, label="c"), 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(EVT_FLOATSPIN, self.OnSpinCell, self.uc_c)
self.uc_gamma = FloatSpin(
self,
digits=self.digits,
name=self._cell_control_names[5],
value=self._cell[5],
)
box.Add(
self.uc_gamma,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="gamma"), 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5
)
self.Bind(EVT_FLOATSPIN, self.OnSpinCell, self.uc_gamma)
sizer.Add(box)
# Space group control
box = wx.BoxSizer(wx.HORIZONTAL)
self.space_group_ctrl = wx.TextCtrl(
self, name="space group", value=self._spacegroup
)
box.Add(
self.space_group_ctrl,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="Space group"),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
self.Bind(wx.EVT_TEXT, self.OnSpaceGroup, self.space_group_ctrl)
sizer.Add(box)
# Distance control
img = self.GetParent().GetParent()._img
box = wx.BoxSizer(wx.HORIZONTAL)
self.distance_ctrl = FloatSpin(
self,
digits=self.digits,
name="Detector Distance",
value=img.get_detector_distance(),
)
self.distance_ctrl.SetIncrement(0.5)
box.Add(
self.distance_ctrl,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
txtd = wx.StaticText(self, label="Detector Distance")
box.Add(txtd, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(EVT_FLOATSPIN, self.OnSpin, self.distance_ctrl)
sizer.Add(box)
# Wavelength control
img = self.GetParent().GetParent()._img
box = wx.BoxSizer(wx.HORIZONTAL)
self.wavelength_ctrl = FloatSpin(
self, digits=4, name="Wavelength", value=img.get_wavelength()
)
self.wavelength_ctrl.SetIncrement(0.05)
box.Add(
self.wavelength_ctrl,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
txtw = wx.StaticText(self, label="Wavelength")
box.Add(txtw, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(EVT_FLOATSPIN, self.OnSpin, self.wavelength_ctrl)
sizer.Add(box)
# d_min control
if self.phil_params.calibrate_unit_cell.d_min is not None:
self.d_min = self.phil_params.calibrate_unit_cell.d_min
else:
self.d_min = 3.5
box = wx.BoxSizer(wx.HORIZONTAL)
self.d_min_ctrl = FloatSpin(
self, digits=self.digits, name="d_min", value=self.d_min
)
box.Add(
self.d_min_ctrl,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
txtd = wx.StaticText(self, label="Highest resolution for ring display")
box.Add(txtd, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(EVT_FLOATSPIN, self.OnSpin, self.d_min_ctrl)
sizer.Add(box)
# Centering controls.
self._center = [0, 0]
box = wx.BoxSizer(wx.HORIZONTAL)
self.spinner_fast = FloatSpin(
self, digits=self.digits, name="fast_ctrl", value=self._center[0]
)
box.Add(
self.spinner_fast,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="Center fast"),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
self.Bind(EVT_FLOATSPIN, self.OnSpinCenter, self.spinner_fast)
self.spinner_slow = FloatSpin(
self, digits=self.digits, name="slow_ctrl", value=self._center[1]
)
box.Add(
self.spinner_slow,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="Center slow"),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
sizer.Add(box)
box = wx.BoxSizer(wx.HORIZONTAL)
self.clear_button = wx.Button(self, -1, "Clear")
box.Add(self.clear_button, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_BUTTON, self.OnClear, self.clear_button)
sizer.Add(box)
origin_box = wx.BoxSizer(wx.HORIZONTAL)
self.origin = wx.StaticText(self, label="")
origin_box.Add(self.origin, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(EVT_FLOATSPIN, self.OnSpinCenter, self.spinner_slow)
sizer.Add(origin_box)
self.DrawRings()
def __del__(self):
if hasattr(self, "_ring_layer") and self._ring_layer is not None:
self._pyslip.DeleteLayer(self._ring_layer)
self._ring_layer = None
def OnSpinCenter(self, event):
obj = event.EventObject
name = obj.GetName()
if name == "fast_ctrl":
self._center[0] = obj.GetValue()
elif name == "slow_ctrl":
self._center[1] = obj.GetValue()
self.DrawRings()
def OnSpinCell(self, event):
obj = event.EventObject
name = obj.GetName()
self._cell[self._cell_control_names.index(name)] = obj.GetValue()
self.DrawRings()
def OnSpin(self, event):
self.DrawRings()
def OnSpaceGroup(self, event):
obj = event.EventObject
self._spacegroup = obj.GetValue()
self.DrawRings()
def OnClear(self, event):
self.__del__()
def _draw_rings_layer(self, dc, data, map_rel):
"""Draw a points layer.
dc the device context to draw on
data an iterable of point tuples:
(x, y, place, radius, colour, x_off, y_off, pdata)
map_rel points relative to map if True, MUST BE TRUE for lightweight
Assumes all points are the same colour, saving 100's of ms.
"""
assert map_rel is True
if len(data) == 0:
return
(lon, lat, place, radius, colour, x_off, y_off, pdata) = data[0]
scale = 2**self._pyslip.tiles.zoom_level
# Draw points on map/view, using transparency if implemented.
try:
dc = wx.GCDC(dc)
except NotImplementedError:
pass
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour, wx.TRANSPARENT))
for (lon, lat, place, radius, colour, x_off, y_off, pdata) in data:
(x, y) = self._pyslip.ConvertGeo2View((lon, lat))
dc.DrawCircle(x, y, radius * scale)
def DrawRings(self):
frame = self.GetParent().GetParent()
try:
uc = symmetry(
unit_cell=self._cell, space_group_symbol=str(self._spacegroup)
)
hkl_list = cctbx.miller.build_set(
uc, False, d_min=self.d_min_ctrl.GetValue()
)
except Exception as e:
frame.update_statusbar(str(e))
return
if self._show_hkl:
hkl_list = hkl_list.common_set(
cctbx.miller.set(
crystal_symmetry=uc, indices=self._show_hkl, anomalous_flag=False
)
)
frame.update_statusbar(
"%d %d %d %d %d %d, " % tuple(self._cell)
+ f"number of indices: {len(hkl_list.indices())}"
)
spacings = sorted(hkl_list.d_spacings(), key=lambda s: s[1], reverse=True)
print(f"Printing spacings, len: {len(spacings)}")
for d in spacings:
print(d)
detector = self._pyslip.tiles.raw_image.get_detector()
wavelength = float(self.wavelength_ctrl.GetValue())
distance = float(self.distance_ctrl.GetValue())
pixel_size = detector[0].get_pixel_size()[
0
] # FIXME assumes square pixels, and that all panels use same pixel size
twotheta = hkl_list.two_theta(wavelength=wavelength)
L_mm = []
L_pixels = []
for tt in twotheta:
L_mm.append(distance * math.tan(tt[1]))
for lmm in L_mm:
L_pixels.append(lmm / pixel_size)
xrayframe = self.GetParent().GetParent()
panel_id, beam_pixel_fast, beam_pixel_slow = xrayframe.get_beam_center_px()
(
beam_pixel_slow,
beam_pixel_fast,
) = xrayframe.pyslip.tiles.flex_image.tile_readout_to_picture(
panel_id, beam_pixel_slow - 0.5, beam_pixel_fast - 0.5
)
center = self._pyslip.tiles.picture_fast_slow_to_map_relative(
beam_pixel_fast + self._center[0], beam_pixel_slow + self._center[1]
)
# XXX Transparency?
ring_data = [
(center[0], center[1], {"colour": "red", "radius": pxl}) for pxl in L_pixels
]
# Remove the old ring layer, and draw a new one.
if hasattr(self, "_ring_layer") and self._ring_layer is not None:
self._pyslip.DeleteLayer(self._ring_layer)
self._ring_layer = None
self._ring_layer = self._pyslip.AddPointLayer(
ring_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
renderer=self._draw_rings_layer,
name="<ring_layer>",
)
panel = detector[0]
fast = col(panel.get_fast_axis())
slow = col(panel.get_slow_axis())
norm = col(panel.get_normal())
x = -panel.pixel_to_millimeter(self._center)[0]
y = -panel.pixel_to_millimeter(self._center)[1]
z = -(panel.get_distance() - distance)
origin = (fast * x + slow * y + norm * z) + col(panel.get_origin())
self.origin.SetLabel("Panel 0 origin: %f, %f, %f" % origin.elems)
| bsd-3-clause | 2cfba83399d8fda8687408520438cd98 | 30.980263 | 88 | 0.539807 | 3.486254 | false | false | false | false |
dials/dials | tests/algorithms/shoebox/test_mask_overlapping.py | 1 | 5935 | from __future__ import annotations
import numpy as np
from dxtbx.model.experiment_list import Experiment, ExperimentList
from dxtbx.serialize import load
from scitbx import matrix
from dials.algorithms import shoebox
from dials.algorithms.profile_model.gaussian_rs import Model
from dials.array_family import flex
def predict_reflections(sequence, crystal):
# Get models from the sequence
beam = sequence.get_beam()
detector = sequence.get_detector()
gonio = sequence.get_goniometer()
scan = sequence.get_scan()
sigma_b = beam.get_sigma_divergence(deg=True)
sigma_m = crystal.get_mosaicity(deg=True)
exlist = ExperimentList()
exlist.append(
Experiment(
imageset=sequence,
beam=beam,
detector=detector,
goniometer=gonio,
scan=scan,
crystal=crystal,
profile=Model(None, 3, sigma_b, sigma_m, deg=True),
)
)
predicted = flex.reflection_table.from_predictions(exlist[0])
predicted["id"] = flex.int(len(predicted), 0)
predicted.compute_bbox(exlist)
# Find overlapping reflections
overlaps = shoebox.find_overlapping(predicted["bbox"])
# Return the reflections and overlaps
return predicted, overlaps
def test(dials_data):
# Load the sequence and crystal
sequence = load.imageset(
dials_data("centroid_test_data", pathlib=True) / "sweep.json"
)
crystal = load.crystal(
str(dials_data("centroid_test_data", pathlib=True) / "crystal.json")
)
# Get models from the sequence
detector = sequence.get_detector()
# Get the reflections and overlaps
reflections, adjacency_list = predict_reflections(sequence, crystal)
reflections["shoebox"] = flex.shoebox(reflections["panel"], reflections["bbox"])
reflections["shoebox"].allocate_with_value(shoebox.MaskCode.Valid)
# If the adjacency list is given, then create the reflection mask
assert len(detector) == 1
image_size = detector[0].get_image_size()
shoeboxes = reflections["shoebox"]
coords = reflections["xyzcal.px"]
shoebox_masker = shoebox.MaskOverlapping()
shoebox_masker(shoeboxes, coords, adjacency_list)
# Loop through all edges
overlapping = []
for e in adjacency_list.edges():
v1, v2 = adjacency_list.source(e), adjacency_list.target(e)
overlapping.append(v1)
overlapping.append(v2)
# Ensure elements are unique
overlapping = set(overlapping)
# Ensure we have some overlaps
assert len(overlapping) > 0
# Get all non-overlapping reflections
all_r = set(range(len(reflections)))
non_overlapping = all_r.difference(overlapping)
# Run the tests
tst_non_overlapping(reflections, non_overlapping, detector[0].get_image_size())
tst_overlapping(reflections, overlapping, adjacency_list, image_size)
def tst_non_overlapping(reflections, non_overlapping, image_size):
"""Ensure non-overlapping reflections have all their values 1."""
# Check that all elements in non_overlapping masks are 1
shoeboxes = reflections["shoebox"]
for i in non_overlapping:
mask = shoeboxes[i].mask
assert mask.all_eq(shoebox.MaskCode.Valid)
def tst_overlapping(reflections, overlapping, adjacency_list, image_size):
"""Ensure masks for overlapping reflections are set properly."""
# Loop through all overlaps
shoeboxes = reflections["shoebox"]
coord = reflections["xyzcal.px"]
for i in overlapping:
r1 = shoeboxes[i]
bbox_1 = r1.bbox
r1_coord = matrix.col(coord[i])
# Create a mask that we expect
r1_size = (bbox_1[5] - bbox_1[4], bbox_1[3] - bbox_1[2], bbox_1[1] - bbox_1[0])
expected_mask = np.zeros(shape=r1_size, dtype=np.int32)
expected_mask[:, :, :] = shoebox.MaskCode.Valid
# Loop through all reflections which this reflection overlaps
for j in adjacency_list.adjacent_vertices(i):
r2 = shoeboxes[j]
bbox_2 = r2.bbox
r2_coord = matrix.col(coord[j])
# Get bounding box of intersection
bbox_3 = (
max(bbox_1[0], bbox_2[0]),
min(bbox_1[1], bbox_2[1]),
max(bbox_1[2], bbox_2[2]),
min(bbox_1[3], bbox_2[3]),
max(bbox_1[4], bbox_2[4]),
min(bbox_1[5], bbox_2[5]),
)
# Check intersection is valid
assert bbox_3[0] < bbox_3[1]
assert bbox_3[2] < bbox_3[3]
assert bbox_3[4] < bbox_3[5]
# Get the coordinates are all mask values
mask_coord = []
for k in range(bbox_3[4], bbox_3[5]):
for j in range(bbox_3[2], bbox_3[3]):
for i in range(bbox_3[0], bbox_3[1]):
mask_coord.append(matrix.col((i + 0.5, j + 0.5, k + 0.5)))
def dist(a, m):
return np.array([(a - b).length() for b in m])
# Find the indices in the intersection area where r2 is closer to
# the point than r1
ind = np.where(dist(r1_coord, mask_coord) > dist(r2_coord, mask_coord))[0]
# Set the mask values for r1 where r2 is closer to 0
k0, k1 = bbox_3[4] - bbox_1[4], bbox_3[5] - bbox_1[4]
j0, j1 = bbox_3[2] - bbox_1[2], bbox_3[3] - bbox_1[2]
i0, i1 = bbox_3[0] - bbox_1[0], bbox_3[1] - bbox_1[0]
intersect_mask = expected_mask[k0:k1, j0:j1, i0:i1]
intersect_mask_1d = intersect_mask.reshape(-1)
intersect_mask_1d[ind] = 0
intersect_mask[:, :] = intersect_mask_1d.reshape(intersect_mask.shape)
expected_mask[k0:k1, j0:j1, i0:i1] = intersect_mask
# Check the masks are the same
calculated_mask = r1.mask.as_numpy_array()
assert np.all(calculated_mask == expected_mask)
| bsd-3-clause | 38498a53652ebc9721ed979f91f37c86 | 34.753012 | 87 | 0.613311 | 3.428654 | false | false | false | false |
douban/dpark | dpark/schedule.py | 1 | 53619 | from __future__ import absolute_import
import marshal
import multiprocessing
import os
import socket
import sys
import time
import six
from six.moves import map, range, urllib, queue, cPickle
import weakref
import threading
import json
from collections import Counter
import zmq
from addict import Dict
from pymesos import MesosSchedulerDriver, encode_data, decode_data
import dpark.conf as conf
from dpark.accumulator import Accumulator
from dpark.dependency import ShuffleDependency
from dpark.env import env
from dpark.taskset import TaskSet, TaskCounter
from dpark.mutable_dict import MutableDict
from dpark.task import ResultTask, ShuffleMapTask, TTID, TaskState, TaskEndReason
from dpark.hostatus import TaskHostManager
from dpark.shuffle import MapOutputTracker
from dpark.utils import (
compress, decompress, spawn, getuser,
sec2nanosec)
from dpark.utils.log import get_logger
from dpark.utils.frame import Scope
from dpark.utils import dag
logger = get_logger(__name__)
EXECUTOR_CPUS = 0.01
EXECUTOR_MEMORY = 128 # cache
POLL_TIMEOUT = 0.1
RESUBMIT_TIMEOUT = 60
MAX_IDLE_TIME = 60 * 30
class Stage(object):
def __init__(self, rdd, shuffleDep, parents, pipelines, pipeline_edges, rdd_pipelines):
"""
:param rdd: output rdd of this stage
:param shuffleDep: for mapOutputStage, determine how computing result will be aggregated, partitioned
"""
self.id = self.new_id()
self.num_try = 0
self.rdd = rdd
self.shuffleDep = shuffleDep
self.is_final = (shuffleDep is None)
self.parents = parents
self.numPartitions = len(rdd)
self.num_finished = 0 # for final stage
self.outputLocs = [[] for _ in range(self.numPartitions)]
self.task_stats = [[] for _ in range(self.numPartitions)]
self.taskcounters = [] # a TaskCounter object for each run/retry
self.submit_time = 0
self.finish_time = 0
self.pipelines = pipelines # each pipeline is a list of rdds
self.pipeline_edges = pipeline_edges # ((src_stage_id, src_pipeline_id), (dst_stage_id, dst_pipeline_id)): N
self.rdd_pipelines = rdd_pipelines # rdd_id: pipeline_id
def __str__(self):
return '<Stage(%d) for %s>' % (self.id, self.rdd)
def __getstate__(self):
raise Exception('should not pickle stage')
def __len__(self):
return self.numPartitions
nextId = 0
@classmethod
def new_id(cls):
cls.nextId += 1
return cls.nextId
@property
def try_id(self):
return TTID.make_taskset_id(self.id, self.num_try + 1) # incr num_try After create TaskSet
@property
def isAvailable(self):
if not self.parents and self.shuffleDep is None:
return True
return all(self.outputLocs)
@property
def num_task_finished(self):
if self.is_final:
return self.num_finished
else:
return len([i for i in self.outputLocs if i])
@property
def num_task_running(self):
if self.taskcounters:
return self.taskcounters[-1].running
else:
return 0
def addOutputLoc(self, partition, host):
self.outputLocs[partition].append(host)
# def removeOutput(self, partition, host):
# prev = self.outputLocs[partition]
# self.outputLocs[partition] = [h for h in prev if h != host]
def removeHost(self, host):
becameUnavailable = False
for ls in self.outputLocs:
if host in ls:
ls.remove(host)
becameUnavailable = True
if becameUnavailable:
msg = ("%s is now unavailable on host %s, "
"postpone resubmit until %d secs later "
"to wait for futher fetch failure")
logger.info(msg, self, host, RESUBMIT_TIMEOUT)
def finish(self):
if not self.finish_time:
self.finish_time = time.time()
def _summary_stats(self):
stats = [x[-1] for x in self.task_stats if x]
d = {}
def _summary(lst):
lst.sort()
r = {'max': max(lst),
'min': min(lst),
'sum': sum(lst)
}
return r
if stats:
for attr in dir(stats[0]):
if not attr.startswith('_'):
d[attr] = _summary(list([getattr(s, attr) for s in stats]))
return d
def get_node_id(self, stage_id, pipeline_id):
if stage_id == -1:
stage_id = self.id
return "PIPELINE_{}.{}".format(stage_id, pipeline_id)
def _fmt_node(self, stage_id, pipeline_id):
if stage_id == -1:
stage_id = self.id
rdds = [{"rdd_name": rdd.ui_label,
"rdd_id": rdd.id,
"api_callsite_id": rdd.scope.api_callsite_id,
"params": rdd.params}
for rdd in self.pipelines[pipeline_id]]
n = {
dag.KW_TYPE: "stage",
dag.KW_ID: self.get_node_id(stage_id, pipeline_id),
dag.KW_LABEL: str(stage_id),
"rdds": rdds
}
return n
def _fmt_edge(self, e):
e, nrdd = e
src, dst = [self.get_node_id(*n) for n in e]
info = {}
if nrdd > 1:
info['#rdd'] = nrdd
return {
# dag.KW_ID: "{}_{}".format(src, dst),
dag.KW_SRC: src,
dag.KW_DST: dst,
"info": info
}
def get_pipeline_graph(self):
nodes = [self._fmt_node(self.id, pipeline_id) for pipeline_id in self.pipelines.keys()]
edges = [self._fmt_edge(e) for e in six.iteritems(self.pipeline_edges)]
g = {dag.KW_NODES: nodes, dag.KW_EDGES: edges}
return g
def fmt_stats(self):
n = self.numPartitions
stats = self._summary_stats()
msg = "[metric = min/avg/max]: "
for k, d in six.iteritems(stats):
sm = d['sum']
unit = k.split('_')[0]
if unit == 'num':
fmt = " = %d/%d/%d"
else:
fmt = " = %.2f/%.2f/%.2f"
if sm > 0:
msg += k
vs = d['min'], sm / n, d['max']
vs = tuple(map(int, vs))
unit_s = " "
if unit == "bytes":
vs = tuple([v >> 20 for v in vs])
fmt = " = %.2f/%.2f/%.2f"
unit_s = " MB "
msg += (fmt % vs)
msg += unit_s
return msg
def _summary_counters(self):
def _sum(attr):
return sum([getattr(counter, attr) for counter in self.taskcounters])
counters = {
"task": {
"all": len(self),
"running": self.num_task_running,
"finished": self.num_task_finished,
},
"fail": dict([(attr[5:], _sum(attr)) for attr in TaskCounter(0).get_fail_types()])
}
return counters
def get_prof(self):
stats = self._summary_stats()
counters = self._summary_counters()
graph = self.get_pipeline_graph()
info = {
'id': self.id,
'parents': [p.id for p in self.parents],
'output_rdd': self.rdd.__class__.__name__,
'output_pipeline': self.get_node_id(self.id, self.rdd.id),
'api_callsite': self.rdd.scope.api_callsite,
'start_time': self.submit_time,
'finish_time': self.finish_time,
'num_partition': self.numPartitions,
'mem': self.rdd.mem,
}
res = {
"info": info,
"stats": stats,
"counters": counters,
'graph': graph
}
return res
def get_tree_stages(self):
stages = []
to_visit = [self]
seen = set()
while to_visit:
s = to_visit.pop()
stages.append(s)
for ss in s.parents:
if ss.id not in seen:
to_visit.append(ss)
seen.add(ss.id)
return stages
class Scheduler:
def start(self):
pass
def runJob(self, rdd, func, partitions, allowLocal):
pass
def clear(self):
pass
def stop(self):
pass
def defaultParallelism(self):
return 2
class CompletionEvent:
def __init__(self, task, reason, result, accumUpdates, stats):
self.task = task
self.reason = reason
self.result = result
self.accumUpdates = accumUpdates
self.stats = stats
def walk_dependencies(rdd, edge_func=lambda r, d: True, node_func=lambda r: True):
visited = set()
to_visit = [rdd]
while to_visit:
r = to_visit.pop(0)
if r.id in visited:
continue
visited.add(r.id)
if node_func(r):
for dep in r.dependencies:
if edge_func(r, dep):
to_visit.append(dep.rdd)
class DAGScheduler(Scheduler):
def __init__(self):
self.id = self.new_id()
self.completionEvents = queue.Queue()
self.idToStage = weakref.WeakValueDictionary()
self.shuffleToMapStage = {}
self.cacheLocs = {}
self.idToRunJob = {}
self.runJobTimes = 0
self.frameworkId = None
self.loghub_dir = None
self.jobstats = []
self.is_dstream = False
self.current_scope = None
self.final_lock = threading.RLock()
self.final_stage = None
self.final_rdd = None
nextId = 0
@classmethod
def new_id(cls):
cls.nextId += 1
return cls.nextId
def clear(self):
self.idToStage.clear()
self.shuffleToMapStage.clear()
self.cacheLocs.clear()
def submitTasks(self, tasks):
raise NotImplementedError
def taskEnded(self, task, reason, result, accumUpdates, stats=None):
self.completionEvents.put(
CompletionEvent(
task,
reason,
result,
accumUpdates,
stats))
def abort(self):
self.completionEvents.put(None)
def getCacheLocs(self, rdd):
return self.cacheLocs.get(rdd.id, [[] for _ in range(len(rdd))])
def updateCacheLocs(self):
self.cacheLocs = env.cacheTrackerServer.getLocationsSnapshot()
def newStage(self, output_rdd, shuffleDep):
""" A stage may contain multi data pipeline, which form a tree with one final output pipline as root.
Zip, CartesianRDD, and Union may commine diff data sources, so lead to a split of the tree.
The leaves of the tree may be one of:
1. a pipeline start from a source RDD (TextFileRDD, Collection)
2. a root pipeline of a parent stage .
Unioned rdds with same lineage keep only one by add it to dep_rdds and assign a pipeline_id.
---
Be careful:
- On one hand, logic for ui should not risk mixing newStage, the latter is much more important.
- On the other hand, input pipeline need to link to parent stages.
"""
parent_stages = set()
pipelines = {output_rdd.id: [output_rdd]}
pipeline_edges = Counter()
rdd_pipelines = {output_rdd.id: output_rdd.id}
to_visit = [output_rdd]
visited = set()
dep_filter = set()
while to_visit:
r = to_visit.pop(0)
if r.id in visited:
continue
visited.add(r.id)
my_pipeline_id = rdd_pipelines.get(r.id)
if my_pipeline_id is not None: # not all rdd have my_pipeline_id
my_pipeline = pipelines.get(my_pipeline_id)
if my_pipeline is None:
logger.warning("miss pipeline: {} ".format(r.scope.key))
if r.shouldCache:
env.cacheTrackerServer.registerRDD(r.id, len(r))
dep_rdds = []
dep_stages = []
for dep in r.dependencies:
if isinstance(dep, ShuffleDependency):
stage = self.getShuffleMapStage(dep)
parent_stages.add(stage)
dep_stages.append(stage)
if my_pipeline_id is not None:
pipeline_edges[(stage.id, stage.rdd.id), (-1, my_pipeline_id)] += 1 # -1 : current_stage
else:
logger.warning("miss pipeline: {} {}".format(r.scope.key, dep.rdd.scope.key))
else:
to_visit.append(dep.rdd)
if r.id not in dep_filter and dep.rdd.id in r.dep_lineage_counts:
dep_rdds.append(dep.rdd)
else:
dep_filter.add(dep.rdd.id)
if my_pipeline is None:
continue
ns, nr = len(dep_stages), len(dep_rdds)
if ns + nr <= 1:
if nr == 1:
dep_rdd = dep_rdds[0]
my_pipeline.append(dep_rdd)
rdd_pipelines[dep_rdd.id] = my_pipeline_id
else:
for dep_rdd in dep_rdds:
did = dep_rdd.id
nrdd = r.dep_lineage_counts[did]
pipelines[did] = [dep_rdd] # create a new pipeline/branch
rdd_pipelines[did] = did
pipeline_edges[((-1, did), (-1, my_pipeline_id))] = nrdd # -1 : current_stage
stage = Stage(output_rdd, shuffleDep, list(parent_stages), pipelines, dict(pipeline_edges), rdd_pipelines)
self.idToStage[stage.id] = stage
logger.debug('new stage: %s', stage)
return stage
def getShuffleMapStage(self, dep):
stage = self.shuffleToMapStage.get(dep.shuffleId, None)
if stage is None:
stage = self.newStage(dep.rdd, dep)
self.shuffleToMapStage[dep.shuffleId] = stage
return stage
def getMissingParentStages(self, stage):
missing = set()
def _(r, dep):
if r.shouldCache and all(self.getCacheLocs(r)):
return False
if isinstance(dep, ShuffleDependency):
stage = self.getShuffleMapStage(dep)
if not stage.isAvailable:
missing.add(stage)
return False
return True
walk_dependencies(stage.rdd, _)
return list(missing)
def get_call_graph(self, final_rdd):
edges = Counter() # <parent, child > : count
visited = set()
to_visit = [final_rdd]
while to_visit:
r = to_visit.pop(0)
if r.id in visited:
continue
visited.add(r.id)
for dep in r.dependencies:
to_visit.append(dep.rdd)
if dep.rdd.scope.api_callsite_id != r.scope.api_callsite_id:
edges[(dep.rdd.scope.api_callsite_id, r.scope.api_callsite_id)] += 1
nodes = set()
run_scope = self.current_scope
edges[(final_rdd.scope.api_callsite_id, run_scope.api_callsite_id)] = 1
for s, d in edges.keys():
nodes.add(s)
nodes.add(d)
return sorted(list(nodes)), dict(edges)
@classmethod
def fmt_call_graph(cls, g0):
nodes0, edges0 = g0
nodes = []
edges = [{dag.KW_ID: "{}_{}".format(parent, child), dag.KW_SRC: parent, dag.KW_DST: child, "count": count}
for ((parent, child), count) in edges0.items()]
for n in nodes0:
scope = Scope.scopes_by_api_callsite_id[n][0]
nodes.append({dag.KW_ID: n,
dag.KW_LABEL: scope.api,
dag.KW_DETAIL: [scope.api_callsite, scope.stack_above_api]})
return {dag.KW_NODES: nodes, dag.KW_EDGES: edges}
def get_profs(self):
res = [marshal.loads(j) for j in self.jobstats]
running = self.get_running_prof()
if running:
res.append(marshal.loads(marshal.dumps(running)))
return res
def get_running_prof(self):
if self.final_stage:
with self.final_lock:
return self._get_stats(self.final_rdd, self.final_stage)
def runJob(self, finalRdd, func, partitions, allowLocal):
self.runJobTimes += 1
self.current_scope = Scope.get("Job %d:{api}" % (self.runJobTimes, ))
outputParts = list(partitions)
numOutputParts = len(partitions)
finalStage = self.newStage(finalRdd, None)
with self.final_lock:
self.final_rdd = finalRdd
self.final_stage = finalStage
try:
from dpark.web.ui.views.rddopgraph import StageInfo
stage_info = StageInfo()
stage_info.create_stage_info(finalStage)
def create_stage_info_recur(cur_stage, is_final=False):
if not cur_stage or cur_stage.id in self.idToRunJob:
return
for par_stage in cur_stage.parents:
create_stage_info_recur(par_stage)
if cur_stage.id not in self.idToRunJob:
self.idToRunJob[cur_stage.id] = StageInfo.idToStageInfo[cur_stage.id]
self.idToRunJob[cur_stage.id].is_final = is_final
create_stage_info_recur(finalStage, is_final=True)
except ImportError:
pass
results = [None] * numOutputParts
finished = [None] * numOutputParts
last_finished = 0
finalStage.num_finished = 0
waiting = set()
running = set()
failed = set()
pendingTasks = {} # stage -> set([task_id..])
lastFetchFailureTime = 0
self.updateCacheLocs()
logger.debug('Final stage: %s, %d', finalStage, numOutputParts)
logger.debug('Parents of final stage: %s', finalStage.parents)
logger.debug(
'Missing parents: %s',
self.getMissingParentStages(finalStage))
def onStageFinished(stage):
def _(r, dep):
return r._do_checkpoint()
MutableDict.merge()
walk_dependencies(stage.rdd, _)
logger.info("stage %d finish %s", stage.id, stage.fmt_stats())
if (allowLocal and
(
not finalStage.parents or
not self.getMissingParentStages(finalStage)
) and numOutputParts == 1):
split = finalRdd.splits[outputParts[0]]
yield func(finalRdd.iterator(split))
onStageFinished(finalStage)
return
def submitStage(stage):
if not stage.submit_time:
stage.submit_time = time.time()
logger.debug('submit stage %s', stage)
if stage not in waiting and stage not in running:
missing = self.getMissingParentStages(stage)
if not missing:
submitMissingTasks(stage)
running.add(stage)
else:
for parent in missing:
submitStage(parent)
waiting.add(stage)
def submitMissingTasks(stage):
myPending = pendingTasks.setdefault(stage, set())
tasks = []
have_prefer = True
if stage == finalStage:
for i in range(numOutputParts):
if not finished[i]:
part = outputParts[i]
if have_prefer:
locs = self.getPreferredLocs(finalRdd, part)
if not locs:
have_prefer = False
else:
locs = []
tasks.append(ResultTask(finalStage.id, finalStage.try_id, part, finalRdd,
func, locs, i))
else:
for part in range(stage.numPartitions):
if not stage.outputLocs[part]:
if have_prefer:
locs = self.getPreferredLocs(stage.rdd, part)
if not locs:
have_prefer = False
else:
locs = []
tasks.append(ShuffleMapTask(stage.id, stage.try_id, part, stage.rdd,
stage.shuffleDep, locs))
logger.debug('add to pending %s tasks', len(tasks))
myPending |= set(t.id for t in tasks)
self.submitTasks(tasks)
submitStage(finalStage)
while finalStage.num_finished != numOutputParts:
try:
evt = self.completionEvents.get(False)
except queue.Empty:
if (failed and
time.time() > lastFetchFailureTime + RESUBMIT_TIMEOUT):
self.updateCacheLocs()
for stage in failed:
logger.info('Resubmitting failed stages: %s', stage)
submitStage(stage)
failed.clear()
else:
time.sleep(0.1)
continue
if evt is None: # aborted
for taskset in self.active_tasksets.values():
self.tasksetFinished(taskset)
if not self.is_dstream:
self._keep_stats(finalRdd, finalStage)
raise RuntimeError('TaskSet aborted!')
task, reason = evt.task, evt.reason
stage = self.idToStage[task.stage_id]
if stage not in pendingTasks: # stage from other taskset
continue
logger.debug('remove from pending %s from %s', task, stage)
pendingTasks[stage].remove(task.id)
if reason == TaskEndReason.success:
Accumulator.merge(evt.accumUpdates)
stage.task_stats[task.partition].append(evt.stats)
if isinstance(task, ResultTask):
finished[task.outputId] = True
finalStage.num_finished += 1
results[task.outputId] = evt.result
while last_finished < numOutputParts and finished[last_finished]:
yield results[last_finished]
results[last_finished] = None
last_finished += 1
stage.finish()
elif isinstance(task, ShuffleMapTask):
stage = self.idToStage[task.stage_id]
stage.addOutputLoc(task.partition, evt.result)
if all(stage.outputLocs):
stage.finish()
logger.debug(
'%s finished; looking for newly runnable stages',
stage
)
if pendingTasks[stage]:
logger.warn('dirty stage %d with %d tasks'
'(select at most 10 tasks:%s) not clean',
stage.id, len(pendingTasks[stage]),
str(list(pendingTasks[stage])[:10]))
del pendingTasks[stage]
onStageFinished(stage)
running.remove(stage)
if stage.shuffleDep is not None:
MapOutputTracker.set_locs(
stage.shuffleDep.shuffleId,
[l[-1] for l in stage.outputLocs])
self.updateCacheLocs()
newlyRunnable = set(
stage for stage in waiting
if not self.getMissingParentStages(stage)
)
waiting -= newlyRunnable
running |= newlyRunnable
logger.debug(
'newly runnable: %s, %s', waiting, newlyRunnable)
for stage in newlyRunnable:
submitMissingTasks(stage)
elif reason == TaskEndReason.fetch_failed:
exception = evt.result
if stage in running:
waiting.add(stage)
running.remove(stage)
mapStage = self.shuffleToMapStage[exception.shuffleId]
mapStage.removeHost(exception.serverUri)
failed.add(mapStage)
lastFetchFailureTime = time.time()
else:
logger.error(
'task %s failed: %s %s %s',
task,
reason,
type(reason),
reason.message)
raise Exception(reason.message)
onStageFinished(finalStage)
if not self.is_dstream:
self._keep_stats(finalRdd, finalStage)
assert all(finished)
with self.final_lock:
self.final_stage = None
self.final_rdd = None
return
def getPreferredLocs(self, rdd, partition):
return rdd.preferredLocations(rdd.splits[partition])
def _keep_stats(self, final_rdd, final_stage):
try:
stats = self._get_stats(final_rdd, final_stage)
self.jobstats.append(marshal.dumps(stats))
if self.loghub_dir:
self._dump_stats(stats)
except Exception as e:
logger.exception("Fail to dump job stats: %s.", e)
def _dump_stats(self, stats):
name = "_".join(map(str, ['sched', self.id, "job", self.runJobTimes])) + ".json"
path = os.path.join(self.loghub_dir, name)
logger.info("writing profile to %s", path)
with open(path, 'w') as f:
json.dump(stats, f, indent=4)
def _get_stats(self, final_rdd, final_stage):
call_graph = self.fmt_call_graph(self.get_call_graph(final_rdd))
cmd = '[dpark] ' + \
os.path.abspath(sys.argv[0]) + ' ' + ' '.join(sys.argv[1:])
stages = sorted([s.get_prof() for s in final_stage.get_tree_stages()],
key=lambda x: x['info']['start_time'])
sink_scope = self.current_scope
sink_id = "SINK_{}_{}".format(self.id, self.runJobTimes)
sink_node = {
dag.KW_TYPE: "sink",
dag.KW_ID: sink_id,
dag.KW_LABEL: sink_scope.name,
"call_id": sink_scope.api_callsite_id
}
sink_edge = {
"source": final_stage.get_node_id(final_stage.id, final_stage.rdd_pipelines[final_rdd.id]),
"target": sink_id,
}
run = {'framework': self.frameworkId,
'scheduler': self.id,
"run": self.runJobTimes,
'sink': {
"call_site": sink_scope.api_callsite,
"node": sink_node,
"edges": sink_edge,
},
'stages': stages,
"call_graph": call_graph,
}
ret = {
'script': {
'cmd': cmd,
'env': {'PWD': os.getcwd()}
},
'run': run
}
return ret
def run_task(task, aid):
logger.debug('Running task %r', task)
try:
Accumulator.clear()
result = task.run(aid)
accumUpdates = Accumulator.values()
MutableDict.flush()
return task.id, result, accumUpdates
except Exception as e:
logger.error('error in task %s', task)
import traceback
traceback.print_exc()
e.task_id = task.id
raise e
class LocalScheduler(DAGScheduler):
attemptId = 0
def nextAttempId(self):
self.attemptId += 1
return self.attemptId
def submitTasks(self, tasks):
logger.debug('submit tasks %s in LocalScheduler', tasks)
for task in tasks:
task_copy = cPickle.loads(cPickle.dumps(task, -1))
try:
_, result, update = run_task(task_copy, self.nextAttempId())
self.taskEnded(task, TaskEndReason.success, result, update)
except Exception:
self.taskEnded(task, TaskEndReason.other_failure, None, None)
def run_task_in_process(task, tid, environ):
try:
return TaskEndReason.success, run_task(task, tid)
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
return TaskEndReason.other_failure, e
class MultiProcessScheduler(LocalScheduler):
def __init__(self, threads):
LocalScheduler.__init__(self)
self.threads = threads
self.tasks = {}
self.pool = None
def submitTasks(self, tasks):
if not tasks:
return
logger.info('Got a taskset with %d tasks: %s', len(tasks), tasks[0].rdd)
total, self.finished, start = len(tasks), 0, time.time()
def initializer():
# when called on subprocess of multiprocessing's Pool,
# default sighandler of SIGTERM will be called to quit gracefully,
# and we ignore other signals to prevent dead lock.
import signal
from .context import _signals
for sig in _signals:
if sig == signal.SIGTERM:
signal.signal(sig, signal.SIG_DFL)
else:
signal.signal(sig, signal.SIG_IGN)
def callback(args):
state, data = args
logger.debug('task end: %s', state)
if state == TaskEndReason.other_failure:
logger.warning('task failed: %s', data)
self.taskEnded(data.task_id, TaskEndReason.other_failure, result=None, accumUpdates=None)
return
tid, result, update = data
task = self.tasks.pop(tid)
self.finished += 1
logger.info('Task %s finished (%d/%d) \x1b[1A',
tid, self.finished, total)
if self.finished == total:
logger.info(
'TaskSet finished in %.1f seconds' + ' ' * 20,
time.time() - start)
self.taskEnded(task, TaskEndReason.success, result, update)
for task in tasks:
logger.debug('put task async: %s', task)
self.tasks[task.id] = task
if not self.pool:
# daemonic processes are not allowed to have children
from dpark.broadcast import start_download_manager
start_download_manager()
self.pool = multiprocessing.Pool(
self.threads or 2,
initializer=initializer
)
self.pool.apply_async(run_task_in_process,
[task, self.nextAttempId(), env.environ],
callback=callback)
def stop(self):
if self.pool:
self.pool.terminate()
self.pool.join()
logger.debug('process pool stopped')
def safe(f):
def _(self, *a, **kw):
with self.lock:
r = f(self, *a, **kw)
return r
return _
class LogReceiver(object):
def __init__(self, output):
self.output = output
self._started = False
self.addr = None
def start(self):
ctx = zmq.Context()
sock = ctx.socket(zmq.PULL)
port = sock.bind_to_random_port('tcp://0.0.0.0')
self._started = True
def collect_log():
while self._started:
if sock.poll(1000, zmq.POLLIN):
line = sock.recv()
self.output.write(line)
sock.close()
ctx.destroy()
spawn(collect_log)
host = socket.gethostname()
self.addr = 'tcp://%s:%d' % (host, port)
logger.debug('log collecter start at %s', self.addr)
def stop(self):
self._started = False
class MesosScheduler(DAGScheduler):
def __init__(self, master, options, webui_url=None):
DAGScheduler.__init__(self)
self.master = master
self.cpus = options.cpus
self.mem = options.mem
self.task_per_node = options.parallel or 8
self.group = options.group
self.logLevel = options.logLevel
self.options = options
self.role = options.role
self.color = options.color
self.webui_url = webui_url
self.started = False
self.last_finish_time = 0
self.last_task_launch_time = None
self.is_suppressed = False
self.isRegistered = False
self.executor = None
self.driver = None
self.out_logger = LogReceiver(sys.stdout)
self.err_logger = LogReceiver(sys.stderr)
self.lock = threading.RLock()
self.task_host_manager = TaskHostManager()
self.init_tasksets()
def init_tasksets(self):
self.active_tasksets = {}
self.ttid_to_agent_id = {}
self.agent_id_to_ttids = {}
def clear(self):
DAGScheduler.clear(self)
self.init_tasksets()
def processHeartBeat(self):
# no need in dpark now, just for compatibility with pymesos
pass
def start(self):
self.out_logger.start()
self.err_logger.start()
def start_driver(self):
name = '[dpark] ' + \
os.path.abspath(sys.argv[0]) + ' ' + ' '.join(sys.argv[1:])
if len(name) > 256:
name = name[:256] + '...'
framework = Dict()
framework.user = getuser()
if framework.user == 'root':
raise Exception('dpark is not allowed to run as \'root\'')
framework.name = name
if self.role:
framework.role = self.role
framework.hostname = socket.gethostname()
if self.webui_url:
framework.webui_url = self.webui_url
self.driver = MesosSchedulerDriver(
self, framework, self.master, use_addict=True
)
self.driver.start()
logger.debug('Mesos Scheudler driver started')
self.started = True
self.last_finish_time = time.time()
def check():
while self.started:
with self.lock:
now = time.time()
if (not self.active_tasksets and
now - self.last_finish_time > MAX_IDLE_TIME):
logger.info('stop mesos scheduler after %d seconds idle',
now - self.last_finish_time)
self.stop()
break
for taskset in self.active_tasksets.values():
if taskset.check_task_timeout():
self.requestMoreResources()
time.sleep(1)
spawn(check)
@safe
def registered(self, driver, frameworkId, masterInfo):
self.isRegistered = True
self.frameworkId = frameworkId.value
logger.debug('connect to master %s:%s, registered as %s',
masterInfo.hostname, masterInfo.port, frameworkId.value)
self.executor = self.getExecutorInfo(str(frameworkId.value))
from dpark.utils.log import add_loghub
_, self.loghub_dir = add_loghub(self.frameworkId)
@safe
def reregistered(self, driver, masterInfo):
logger.warning('re-connect to mesos master %s:%s',
masterInfo.hostname, masterInfo.port)
def disconnected(self, driver):
logger.debug('framework is disconnected')
def _get_container_image(self):
return self.options.image
@safe
def getExecutorInfo(self, framework_id):
info = Dict()
info.framework_id.value = framework_id
info.command.value = '%s %s' % (
sys.executable,
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'executor.py'))
)
info.executor_id.value = env.get('DPARK_ID', 'default')
info.command.environment.variables = variables = []
info.shutdown_grace_period.nanoseconds = 5 * (10 ** 9)
v = Dict()
variables.append(v)
v.name = 'UID'
v.value = str(os.getuid())
v = Dict()
variables.append(v)
v.name = 'GID'
v.value = str(os.getgid())
container_image = self._get_container_image()
if container_image:
info.container.type = 'DOCKER'
info.container.docker.image = container_image
info.container.docker.parameters = parameters = []
p = Dict()
p.key = 'memory-swap'
p.value = '-1'
parameters.append(p)
info.container.volumes = volumes = []
for path in ['/etc/passwd', '/etc/group']:
v = Dict()
volumes.append(v)
v.host_path = v.container_path = path
v.mode = 'RO'
for path in conf.MOOSEFS_MOUNT_POINTS:
v = Dict()
volumes.append(v)
v.host_path = v.container_path = path
v.mode = 'RW'
for path in conf.DPARK_WORK_DIR.split(','):
v = Dict()
volumes.append(v)
v.host_path = v.container_path = path
v.mode = 'RW'
def _mount_volume(_volumes, _host_path, _container_path, _mode):
_v = Dict()
_volumes.append(_v)
_v.container_path = _container_path
_v.mode = _mode
if _host_path:
_v.host_path = _host_path
if self.options.volumes:
for volume in self.options.volumes.split(','):
fields = volume.split(':')
if len(fields) == 3:
host_path, container_path, mode = fields
mode = mode.upper()
assert mode in ('RO', 'RW')
elif len(fields) == 2:
host_path, container_path = fields
mode = 'RW'
elif len(fields) == 1:
container_path, = fields
host_path = ''
mode = 'RW'
else:
raise Exception('cannot parse volume %s', volume)
_mount_volume(volumes, host_path,
container_path, mode)
info.resources = resources = []
mem = Dict()
resources.append(mem)
mem.name = 'mem'
mem.type = 'SCALAR'
mem.scalar.value = EXECUTOR_MEMORY
cpus = Dict()
resources.append(cpus)
cpus.name = 'cpus'
cpus.type = 'SCALAR'
cpus.scalar.value = EXECUTOR_CPUS
Script = os.path.realpath(sys.argv[0])
info.name = Script
info.data = encode_data(marshal.dumps(
(
Script, os.getcwd(), sys.path, dict(os.environ),
self.task_per_node, self.out_logger.addr, self.err_logger.addr,
self.logLevel, self.color, env.environ
)
))
assert len(info.data) < (50 << 20), \
'Info data too large: %s' % (len(info.data),)
return info
@safe
def submitTasks(self, tasks):
if not tasks:
return
rdd = tasks[0].rdd
assert all(t.rdd is rdd for t in tasks)
taskset = TaskSet(self, tasks, rdd.cpus or self.cpus, rdd.mem or self.mem,
rdd.gpus, self.task_host_manager)
self.active_tasksets[taskset.id] = taskset
stage_scope = ''
try:
from dpark.web.ui.views.rddopgraph import StageInfo
stage_scope = StageInfo.idToRDDNode[tasks[0].rdd.id].scope.api_callsite
except:
pass
stage = self.idToStage[tasks[0].stage_id]
stage.num_try += 1
stage.taskcounters.append(taskset.counter)
logger.info(
'Got taskset %s with %d tasks for stage: %d '
'at scope[%s] and rdd:%s',
taskset.id,
len(tasks),
tasks[0].stage_id,
stage_scope,
tasks[0].rdd)
need_revive = self.started
if not self.started:
self.start_driver()
while not self.isRegistered:
self.lock.release()
time.sleep(0.01)
self.lock.acquire()
if need_revive:
self.requestMoreResources()
def requestMoreResources(self):
logger.debug('reviveOffers')
self.driver.reviveOffers()
self.is_suppressed = False
@safe
def resourceOffers(self, driver, offers):
rf = Dict()
now = time.time()
if not self.active_tasksets or (all(taskset.counter.launched == taskset.counter.n
for taskset in self.active_tasksets.values())
and self.last_task_launch_time is not None
and self.last_task_launch_time + conf.TIME_TO_SUPPRESS < now):
logger.debug('suppressOffers')
driver.suppressOffers()
self.is_suppressed = True
rf.refuse_seconds = 60 * 5
for o in offers:
driver.declineOffer(o.id, rf)
return
start = time.time()
filter_offer = []
for o in offers:
try:
if conf.ban(o.hostname):
logger.debug("skip offer on banned node: %s", o.hostname)
driver.declineOffer(o.id, filters=Dict(refuse_seconds=0xFFFFFFFF))
continue
except:
logger.exception("bad ban() func in dpark.conf")
group = (
self.getAttribute(
o.attributes,
'group') or 'None')
if (self.group or group.startswith(
'_')) and group not in self.group:
driver.declineOffer(o.id, filters=Dict(refuse_seconds=0xFFFFFFFF))
continue
unavailability = o.get('unavailability')
if (unavailability is not None and
sec2nanosec(time.time() + conf.DEFAULT_TASK_TIME) >= unavailability['start']['nanoseconds']):
logger.debug('the host %s plan to maintain, so skip it', o.hostname)
driver.declineOffer(o.id, filters=Dict(refuse_seconds=600))
continue
if self.task_host_manager.is_unhealthy_host(o.hostname):
logger.warning('the host %s is unhealthy so skip it', o.hostname)
driver.declineOffer(o.id, filters=Dict(refuse_seconds=1800))
continue
self.task_host_manager.register_host(o.hostname)
filter_offer.append(o)
offers = filter_offer
cpus = [self.getResource(o.resources, 'cpus') for o in offers]
gpus = [self.getResource(o.resources, 'gpus') for o in offers]
mems = [self.getResource(o.resources, 'mem')
- (o.agent_id.value not in self.agent_id_to_ttids
and EXECUTOR_MEMORY or 0)
for o in offers]
# logger.debug('get %d offers (%s cpus, %s mem, %s gpus), %d tasksets',
# len(offers), sum(cpus), sum(mems), sum(gpus), len(self.active_tasksets))
mesos_tasks = {}
tasks = {}
max_create_time = 0
for taskset in self.active_tasksets.values():
while True:
host_offers = {}
for i, o in enumerate(offers):
if self.agent_id_to_ttids.get(o.agent_id.value, 0) >= self.task_per_node:
logger.debug('the task limit exceeded at host %s',
o.hostname)
continue
if (mems[i] < self.mem + EXECUTOR_MEMORY
or cpus[i] < self.cpus + EXECUTOR_CPUS):
continue
host_offers[o.hostname] = (i, o)
assigned_list = taskset.taskOffer(host_offers, cpus, mems, gpus)
if not assigned_list:
break
for i, o, t in assigned_list:
t0 = time.time()
mesos_task = self.createTask(o, t)
max_create_time = max(max_create_time, time.time() - t0)
mesos_tasks.setdefault(o.id.value, []).append(mesos_task)
tasks.setdefault(o.id.value, []).append(t)
logger.debug('dispatch %s into %s', t, o.hostname)
ttid = mesos_task.task_id.value
agent_id = o.agent_id.value
taskset.ttids.add(ttid)
self.ttid_to_agent_id[ttid] = agent_id
self.agent_id_to_ttids[agent_id] = self.agent_id_to_ttids.get(agent_id, 0) + 1
cpus[i] -= min(cpus[i], t.cpus)
mems[i] -= t.mem
gpus[i] -= t.gpus
used = time.time() - start
if used > 10:
logger.warning('use too much time in resourceOffers: %.2fs, %d offers,'
'assigned %d tasks, max_create_time = %ds',
used,
len(offers),
len(mesos_tasks),
max_create_time)
for o in offers:
oid = o.id.value
if oid in mesos_tasks:
driver.launchTasks(o.id, mesos_tasks[oid])
for task in tasks[oid]:
task.stage_time = time.time()
else:
driver.declineOffer(o.id)
if tasks:
self.last_task_launch_time = time.time()
# logger.debug('reply with %d tasks, %s cpus %s mem %s gpus left',
# sum(len(ts) for ts in tasks.values()),
# sum(cpus), sum(mems), sum(gpus))
@safe
def inverseOffers(self, driver, offers):
for o in offers:
driver.acceptInverseOffers(o.id)
@safe
def offerRescinded(self, driver, offer_id):
logger.debug('rescinded offer: %s', offer_id)
if self.active_tasksets:
self.requestMoreResources()
def getResource(self, res, name):
for r in res:
if r.name == name:
return r.scalar.value
return 0.0
def getAttribute(self, attrs, name):
for r in attrs:
if r.name == name:
return r.text.value
def createTask(self, o, t):
task = Dict()
tid = t.try_id
task.name = 'task %s' % tid
task.task_id.value = tid
task.agent_id.value = o.agent_id.value
task.data = encode_data(
compress(cPickle.dumps((t, tid), -1))
)
task.executor = self.executor
if len(task.data) > 1000 * 1024:
logger.warning('task too large: %s %d',
t, len(task.data))
assert len(task.data) < (50 << 20), \
'Task data too large: %s' % (len(task.data),)
resources = task.resources = []
cpu = Dict()
resources.append(cpu)
cpu.name = 'cpus'
cpu.type = 'SCALAR'
cpu.scalar.value = t.cpus
mem = Dict()
resources.append(mem)
mem.name = 'mem'
mem.type = 'SCALAR'
mem.scalar.value = t.mem
gpu = Dict()
resources.append(gpu)
gpu.name = 'gpus'
gpu.type = 'SCALAR'
gpu.scalar.value = t.gpus
return task
@safe
def statusUpdate(self, driver, status):
def plot_progresses():
if self.color:
total = len(self.active_tasksets)
logger.info('\x1b[2K\x1b[J\x1b[1A')
for i, taskset_id in enumerate(self.active_tasksets):
if i == total - 1:
ending = '\x1b[%sA' % total
else:
ending = ''
tasksets = self.active_tasksets[taskset_id]
tasksets.progress(ending)
mesos_task_id = status.task_id.value
state = status.state
source = status.source
reason = status.get('reason')
msg = status.get('message') # type: str
if source == 'SOURCE_EXECUTOR' and msg:
reason, msg = msg.split(':', 1)
data = status.get('data')
if data is not None:
data = cPickle.loads(decode_data(data))
logger.debug('status update: %s %s %s %s', mesos_task_id, state, reason, msg)
ttid = TTID(mesos_task_id)
taskset = self.active_tasksets.get(ttid.taskset_id) # type: TaskSet
if taskset is None:
if state == TaskState.running:
logger.debug('kill task %s as its taskset has gone', mesos_task_id)
self.driver.killTask(Dict(value=mesos_task_id))
else:
logger.debug('ignore task %s as its taskset has gone', mesos_task_id)
return
if mesos_task_id not in taskset.ttids:
logger.debug('ignore task %s as it has finished or failed, new msg: %s', mesos_task_id, (state, reason))
return
if state == TaskState.running:
taskset.statusUpdate(ttid.task_id, ttid.task_try, state)
if taskset.counter.finished == 0:
plot_progresses()
return
# terminal state
taskset.ttids.discard(mesos_task_id)
if mesos_task_id in self.ttid_to_agent_id:
agent_id = self.ttid_to_agent_id[mesos_task_id]
if agent_id in self.agent_id_to_ttids:
self.agent_id_to_ttids[agent_id] -= 1
del self.ttid_to_agent_id[mesos_task_id]
if state == TaskState.finished:
try:
result, accUpdate, task_stats = data
flag, data = result
if flag >= 2:
try:
data = urllib.request.urlopen(data).read()
except IOError:
# try again
data = urllib.request.urlopen(data).read()
flag -= 2
data = decompress(data)
if flag == 0:
result = marshal.loads(data)
else:
result = cPickle.loads(data)
taskset.statusUpdate(ttid.task_id, ttid.task_try, state,
result=result, update=accUpdate, stats=task_stats)
plot_progresses()
except Exception as e:
logger.warning('error when cPickle.loads(): %s, data:%s', e, len(data))
state = TaskState.failed
taskset.statusUpdate(ttid.task_id, ttid.task_try, state,
reason=TaskEndReason.load_failed, message='load failed: %s' % e)
else:
exception = data if source == 'SOURCE_EXECUTOR' else None # type: Optional[Exception]
taskset.statusUpdate(ttid.task_id, ttid.task_try, state, reason, msg, result=exception)
@safe
def tasksetFinished(self, taskset):
logger.debug('taskset %s finished', taskset.id)
if taskset.id in self.active_tasksets:
self.last_finish_time = time.time()
for mesos_task_id in taskset.ttids:
self.driver.killTask(Dict(value=mesos_task_id))
del self.active_tasksets[taskset.id]
if not self.active_tasksets:
self.agent_id_to_ttids.clear()
@safe
def error(self, driver, message):
logger.error('Mesos error message: %s', message)
raise RuntimeError(message)
# @safe
def stop(self):
if not self.started:
return
logger.debug('stop scheduler')
self.started = False
self.isRegistered = False
self.driver.stop(False)
self.driver.join()
self.driver = None
self.out_logger.stop()
self.err_logger.stop()
def defaultParallelism(self):
return 16
def frameworkMessage(self, driver, executor_id, agent_id, data):
logger.warning('[agent %s] %s', agent_id.value, data)
def executorLost(self, driver, executor_id, agent_id, status):
logger.warning(
'executor at %s %s lost: %s',
agent_id.value,
executor_id.value,
status)
self.agent_id_to_ttids.pop(agent_id.value, None)
def slaveLost(self, driver, agent_id):
logger.warning('agent %s lost', agent_id.value)
self.agent_id_to_ttids.pop(agent_id.value, None)
def killTask(self, task_id, num_try):
tid = Dict()
tid.value = TTID.make_ttid(task_id, num_try)
self.driver.killTask(tid)
| bsd-3-clause | fed4a412a9cddbc2fb7686b148d07adb | 33.930945 | 117 | 0.516254 | 4.057435 | false | false | false | false |
dials/dials | tests/algorithms/indexing/lattice_search/test_lattice_search.py | 1 | 4073 | from __future__ import annotations
import py.path
import pytest
from cctbx import sgtbx, uctbx
from dxtbx.imageset import ImageSet
from dxtbx.serialize import load
from dials.algorithms.indexing import lattice_search, stills_indexer
from dials.array_family import flex
from dials.command_line.index import phil_scope
from dials.command_line.slice_sequence import slice_experiments, slice_reflections
@pytest.fixture
def i04_weak_data(dials_regression):
data_dir = py.path.local(dials_regression).join(
"indexing_test_data", "i04_weak_data"
)
reflections_path = data_dir.join("full.pickle").strpath
experiments_path = data_dir.join("experiments_import.json").strpath
reflections = flex.reflection_table.from_file(reflections_path)
experiments = load.experiment_list(experiments_path, check_format=False)
return {
"reflections": slice_reflections(reflections, [(1, 20)]),
"experiments": slice_experiments(experiments, [(1, 20)]),
}
@pytest.mark.parametrize(
"indexing_method,space_group,unit_cell",
(
("fft1d", None, None),
("fft3d", None, None),
("real_space_grid_search", "P422", (57.8, 57.8, 150.0, 90, 90, 90)),
),
)
def test_BasisVectorSearch_i04_weak_data(
i04_weak_data, indexing_method, space_group, unit_cell
):
reflections = i04_weak_data["reflections"]
experiments = i04_weak_data["experiments"]
params = phil_scope.fetch().extract()
params.indexing.refinement_protocol.n_macro_cycles = 2
params.indexing.basis_vector_combinations.max_refine = 5
params.indexing.method = indexing_method
if unit_cell is not None:
params.indexing.known_symmetry.unit_cell = uctbx.unit_cell(unit_cell)
if space_group is not None:
params.indexing.known_symmetry.space_group = sgtbx.space_group_info(space_group)
idxr = lattice_search.BasisVectorSearch(reflections, experiments, params)
idxr.index()
indexed_experiments = idxr.refined_experiments
assert len(indexed_experiments) == 1
assert indexed_experiments[0].crystal.get_unit_cell().parameters() == pytest.approx(
(57.752, 57.776, 150.013, 90.0101, 89.976, 90.008), rel=1e-3
)
@pytest.mark.xfel
@pytest.mark.parametrize(
"indexing_method,space_group,unit_cell",
(
("fft3d", "P422", (57.8, 57.8, 150.0, 90, 90, 90)),
("fft1d", "P422", (57.8, 57.8, 150.0, 90, 90, 90)),
("real_space_grid_search", "P422", (57.8, 57.8, 150.0, 90, 90, 90)),
("low_res_spot_match", "P422", (57.8, 57.8, 150.0, 90, 90, 90)),
),
)
def test_stills_indexer_methods_i04_weak_data(
i04_weak_data, indexing_method, space_group, unit_cell
):
reflections = slice_reflections(i04_weak_data["reflections"], [(1, 2)])
experiments = slice_experiments(i04_weak_data["experiments"], [(1, 2)])
for experiment in experiments:
experiment.imageset = ImageSet(
experiment.imageset.data(), experiment.imageset.indices()
)
experiment.imageset.set_scan(None)
experiment.imageset.set_goniometer(None)
experiment.scan = None
experiment.goniometer = None
params = phil_scope.fetch().extract()
params.indexing.method = indexing_method
params.indexing.basis_vector_combinations.max_refine = 5
if unit_cell is not None:
params.indexing.known_symmetry.unit_cell = uctbx.unit_cell(unit_cell)
if space_group is not None:
params.indexing.known_symmetry.space_group = sgtbx.space_group_info(space_group)
try:
idxr = stills_indexer.StillsIndexerBasisVectorSearch(
reflections, experiments, params
)
except RuntimeError:
idxr = stills_indexer.StillsIndexerLatticeSearch(
reflections, experiments, params
)
idxr.index()
indexed_experiments = idxr.refined_experiments
assert len(indexed_experiments) == 1
assert indexed_experiments[0].crystal.get_unit_cell().parameters() == pytest.approx(
(57.752, 57.776, 150.013, 90.0101, 89.976, 90.008), rel=1e-2
)
| bsd-3-clause | 9e8d5f72f075f7e7386043ea208406b1 | 36.712963 | 88 | 0.67616 | 3.21722 | false | true | false | false |
dials/dials | src/dials/util/image_viewer/slip_viewer/ring_frame.py | 1 | 11177 | from __future__ import annotations
import math
import wx
class RingSettingsFrame(wx.MiniFrame):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
szr = wx.BoxSizer(wx.VERTICAL)
panel = RingSettingsPanel(self)
self.SetSizer(szr)
szr.Add(panel, 1, wx.EXPAND)
szr.Fit(panel)
self.panel = panel
self.sizer = szr
self.Fit()
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy(), self)
class RingSettingsPanel(wx.Panel):
def __init__(self, *args, **kwds):
# XXX Support several rings. Plot radial distribution somewhere
# (not here), but maybe distribution along ring. Drop-down menu
# for ring center, and button to reset to beam center.
super().__init__(*args, **kwds)
# Needed to draw and delete the rings. XXX Applies to
# calibration_frame as well?
self._pyslip = self.GetParent().GetParent().pyslip
sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer)
# Number of decimal digits for distances.
self.digits = 2
# Distance control XXX probably does not belong here
# print "DISTANCE",self.GetParent().GetParent().viewer._img
# box = wx.BoxSizer(wx.HORIZONTAL)
# from wxtbx.phil_controls.floatctrl import FloatCtrl
# from wxtbx.phil_controls import EVT_PHIL_CONTROL
# self.distance_ctrl = FloatCtrl(self, pos=(300,180), size=(80,-1),
# value=80.00,
# name="Detector Distance")
# self.distance_ctrl.SetMax(1000)
# self.distance_ctrl.SetMin(5)
# self.distance_ctrl.SetOptional(False)
# box.Add(self.distance_ctrl, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
# txtd = wx.StaticText(self, label="Detector Distance")
# box.Add(txtd, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
# s.Add(box)
from wx.lib.agw.floatspin import EVT_FLOATSPIN, FloatSpin
# XXX Should really make value be in Aangstroem resolution, and
# have a non-linear slider.
self._radius = 100
self._center = [0, 0]
radius_max = 2000
radius_min = 10
# Radius controls.
box = wx.BoxSizer(wx.HORIZONTAL)
self.slider = wx.Slider(
self,
maxValue=radius_max,
minValue=radius_min,
size=(250, -1),
style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL,
value=self._radius,
)
box.Add(
self.slider, 0, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 5
)
self.Bind(wx.EVT_SLIDER, self.OnSlide, self.slider)
self.spinner = FloatSpin(
self,
digits=self.digits,
max_val=radius_max,
min_val=radius_min,
value=self._radius,
)
box.Add(
self.spinner, 0, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 5
)
self.Bind(EVT_FLOATSPIN, self.OnSpin, self.spinner)
self.auto = wx.Button(self, label="Auto fit")
self.Bind(wx.EVT_BUTTON, self.OnAutoFit, self.auto)
box.Add(
self.auto, 0, wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL, 5
)
sizer.Add(box)
# Centering controls.
box = wx.BoxSizer(wx.HORIZONTAL)
self.spinner_fast = FloatSpin(
self, digits=self.digits, name="fast_ctrl", value=self._center[0]
)
box.Add(
self.spinner_fast,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="Center fast"),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
self.Bind(EVT_FLOATSPIN, self.OnSpinCenter, self.spinner_fast)
self.spinner_slow = FloatSpin(
self, digits=self.digits, name="slow_ctrl", value=self._center[1]
)
box.Add(
self.spinner_slow,
0,
wx.RIGHT | wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
5,
)
box.Add(
wx.StaticText(self, label="Center slow"),
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
5,
)
self.Bind(EVT_FLOATSPIN, self.OnSpinCenter, self.spinner_slow)
self.clear_button = wx.Button(self, -1, "Clear")
box.Add(self.clear_button, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_BUTTON, self.OnClear, self.clear_button)
sizer.Add(box)
self.DrawRing()
def __del__(self):
if hasattr(self, "_ring_layer") and self._ring_layer is not None:
self._pyslip.DeleteLayer(self._ring_layer)
self._ring_layer = None
def OnSlide(self, event):
# Keep slider and spinner synchronized.
obj = event.EventObject # XXX Is this construct sane? See below
# and in calibration_frame, too!
self._radius = obj.GetValue()
self.spinner.SetValue(self._radius)
self.DrawRing()
def OnAutoFit(self, event):
jitter = 6
detector = self._pyslip.tiles.raw_image.get_detector()
beam = self._pyslip.tiles.raw_image.get_beam()
# FIXME assumes all detector elements use the same millimeter-to-pixel convention
if detector[0].get_distance() > 0:
h = detector.hierarchy()
if len(h) > 0:
beam_pixel_fast, beam_pixel_slow = detector[0].millimeter_to_pixel(
detector.hierarchy().get_beam_centre(beam.get_s0())
)
else:
beam_pixel_fast, beam_pixel_slow = detector[0].millimeter_to_pixel(
detector[0].get_beam_centre(beam.get_s0())
)
avg_distance = -sum(p.get_distance() for p in detector) / len(detector)
beam_pixel_fast += self._center[0]
beam_pixel_slow += self._center[1]
def PointsOnCircle(center, radius, count):
for r in range(count):
t = (r / count) * 2 * math.pi
yield (
center[0] + (radius * math.cos(t)),
center[1] + (radius * math.sin(t)),
)
best = float("-inf")
bestc = [self._center[0], self._center[1]]
bestr = self._radius
image_data = self._pyslip.tiles.raw_image.get_image_data()
if not isinstance(image_data, tuple):
image_data = (image_data,)
for j in range(-jitter, jitter, 1):
j /= 2
for i in range(-jitter, jitter, 1):
i /= 2
for r in range(-jitter, jitter, 1):
r /= 2
total = 0.0
for point in PointsOnCircle(
(beam_pixel_fast + i, beam_pixel_slow + j),
self._radius + r,
360,
):
mm = detector[0].pixel_to_millimeter(point)
mm = (mm[0], mm[1], avg_distance)
pid = detector.get_panel_intersection(mm)
if pid >= 0:
px = detector[pid].get_ray_intersection_px(mm)
px = [int(round(px[0])), int(round(px[1]))]
data = image_data[pid]
if (
px[0] >= 0
and px[0] < data.focus()[1]
and px[1] >= 0
and px[1] < data.focus()[0]
):
total += data[px[1], px[0]]
if total > best:
best = total
bestc = [self._center[0] + i, self._center[1] + j]
bestr = self._radius + r
print(f"r: {r: 3.1f}, i: {i: 3.1f}, j: {j: 3.1f}, best: {best:f}")
print("DONE", bestc, bestr)
self._radius = bestr
self._center = bestc
self.spinner.SetValue(bestr)
self.spinner_fast.SetValue(bestc[0])
self.spinner_slow.SetValue(bestc[1])
self.DrawRing()
def OnSpin(self, event):
# Keep slider and spinner synchronized. XXX OnSpinRadius()?
obj = event.EventObject
self._radius = obj.GetValue()
self.slider.SetValue(self._radius)
self.DrawRing()
def OnSpinCenter(self, event):
obj = event.EventObject
name = obj.GetName()
if name == "fast_ctrl":
self._center[0] = obj.GetValue()
elif name == "slow_ctrl":
self._center[1] = obj.GetValue()
self.DrawRing()
def OnClear(self, event):
self.__del__()
def _draw_ring_layer(self, dc, data, map_rel):
"""Draw a points layer.
dc the device context to draw on
data an iterable of point tuples:
(x, y, place, radius, colour, x_off, y_off, pdata)
map_rel points relative to map if True, MUST BE TRUE for lightweight
Assumes all points are the same colour, saving 100's of ms.
"""
assert map_rel is True
if len(data) == 0:
return
(lon, lat, place, radius, colour, x_off, y_off, pdata) = data[0]
scale = 2**self._pyslip.tiles.zoom_level
# Draw points on map/view, using transparency if implemented.
try:
dc = wx.GCDC(dc)
except NotImplementedError:
pass
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour, wx.TRANSPARENT))
for (lon, lat, place, radius, colour, x_off, y_off, pdata) in data:
(x, y) = self._pyslip.ConvertGeo2View((lon, lat))
dc.DrawCircle(x, y, radius * scale)
def DrawRing(self):
xrayframe = self.GetParent().GetParent()
panel_id, beam_pixel_fast, beam_pixel_slow = xrayframe.get_beam_center_px()
(
beam_pixel_slow,
beam_pixel_fast,
) = xrayframe.pyslip.tiles.flex_image.tile_readout_to_picture(
panel_id, beam_pixel_slow - 0.5, beam_pixel_fast - 0.5
)
center = self._pyslip.tiles.picture_fast_slow_to_map_relative(
beam_pixel_fast + self._center[0], beam_pixel_slow + self._center[1]
)
# XXX Transparency?
ring_data = [(center[0], center[1], {"colour": "red", "radius": self._radius})]
# Remove the old ring layer, and draw a new one. XXX Why
# disappears at highest levels?
if hasattr(self, "_ring_layer") and self._ring_layer is not None:
self._pyslip.DeleteLayer(self._ring_layer)
self._ring_layer = None
self._ring_layer = self._pyslip.AddPointLayer(
ring_data,
map_rel=True,
visible=True,
show_levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5],
renderer=self._draw_ring_layer,
name="<ring_layer>",
)
| bsd-3-clause | ca2456609d5c38ed931b835473404f17 | 33.928125 | 89 | 0.521607 | 3.709592 | false | false | false | false |
dials/dials | tests/algorithms/profile_model/test_calculator.py | 1 | 1715 | from __future__ import annotations
from dials.algorithms.profile_model.gaussian_rs.calculator import (
_select_reflections_for_sigma_calc,
)
from dials.array_family import flex
def test_select_reflections_for_sigma_calc():
"""Test the reflection selection helper function."""
# first select all if below threshold
reflections = flex.reflection_table()
reflections["id"] = flex.int(range(0, 1000))
reflections.set_flags(flex.bool(1000, True), reflections.flags.used_in_refinement)
reflections = _select_reflections_for_sigma_calc(reflections, 10000)
assert reflections.size() == 1000
# select used_in_refinement if not all used in refinement and above threshold
reflections = flex.reflection_table()
reflections["id"] = flex.int(range(0, 1000))
good = flex.bool(1000, False)
sel = flex.size_t(i for i in range(0, 1000, 2))
good.set_selected(sel, True)
reflections.set_flags(good, reflections.flags.used_in_refinement)
reflections = _select_reflections_for_sigma_calc(
reflections, min_number_of_refl=200
)
assert reflections.size() == 500
assert list(reflections["id"])[0:50] == list(range(0, 100, 2))
# top up if not enough used in refinement compared to threshold
reflections = flex.reflection_table()
reflections["id"] = flex.int(range(0, 1000))
good = flex.bool(1000, False)
sel = flex.size_t(i for i in range(0, 1000, 2))
good.set_selected(sel, True)
reflections.set_flags(good, reflections.flags.used_in_refinement)
reflections = _select_reflections_for_sigma_calc(
reflections, min_number_of_refl=700
)
assert reflections.size() > 700
assert reflections.size() < 1000
| bsd-3-clause | 74fd6274194088eff914b1c6ff57baff | 39.833333 | 86 | 0.698542 | 3.375984 | false | false | false | false |
dials/dials | src/dials/algorithms/refinement/two_theta_refiner.py | 1 | 9766 | """Versions of refinement classes for two theta refinement of the unit cell"""
from __future__ import annotations
import logging
from math import pi, sqrt
from scitbx import matrix
from scitbx.math import five_number_summary
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
PredictionParameterisation,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ExperimentsPredictor,
)
from dials.algorithms.refinement.reflection_manager import ReflectionManager
from dials.algorithms.refinement.target import Target
from dials.array_family import flex
from dials.util import tabulate
logger = logging.getLogger(__name__)
# constants
RAD2DEG = 180.0 / pi
DEG2RAD = pi / 180.0
class ConstantTwoThetaWeightingStrategy:
def calculate_weights(self, reflections):
reflections["2theta.weights"] = flex.double(len(reflections), 1)
return reflections
def calc_2theta(reflections, experiments):
"""Calculate and return 2theta angles in radians"""
twotheta = flex.double(len(reflections), 0.0)
for iexp, exp in enumerate(experiments):
isel = (reflections["id"] == iexp).iselection()
sub_ref = reflections.select(isel)
s0 = matrix.col(exp.beam.get_s0())
for ipanel in range(len(exp.detector)):
sel = sub_ref["panel"] == ipanel
panel_ref = sub_ref.select(sel)
x, y, phi = panel_ref["xyzobs.mm.value"].parts()
s1 = exp.detector[ipanel].get_lab_coord(flex.vec2_double(x, y))
s1 = s1 / s1.norms() * s0.length()
sub_isel = isel.select(sel)
twotheta.set_selected(sub_isel, s1.angle(s0))
return twotheta
class TwoThetaReflectionManager(ReflectionManager):
_weighting_strategy = ConstantTwoThetaWeightingStrategy()
def __init__(self, *args, **kwargs):
# call base __init__
super().__init__(*args, **kwargs)
# set observed 2theta angles
self._reflections["2theta_obs.rad"] = calc_2theta(
self._reflections, self._experiments
)
# placeholder for calculated 2theta angles
self._reflections["2theta_cal.rad"] = flex.double(len(self._reflections), 0.0)
return
def print_stats_on_matches(self):
l = self.get_matches()
nref = len(l)
if nref == 0:
logger.warning(
"Unable to calculate summary statistics for zero observations"
)
return
twotheta_resid = l["2theta_resid"]
w_2theta = l["2theta.weights"]
msg = (
f"\nSummary statistics for {nref} observations" + " matched to predictions:"
)
header = ["", "Min", "Q1", "Med", "Q3", "Max"]
rows = []
row_data = five_number_summary(twotheta_resid)
rows.append(
["2theta_c - 2theta_o (deg)"] + [f"{e * RAD2DEG:.4g}" for e in row_data]
)
row_data = five_number_summary(w_2theta)
rows.append(["2theta weights"] + [f"{e * DEG2RAD ** 2:.4g}" for e in row_data])
logger.info(msg)
logger.info(tabulate(rows, header) + "\n")
class TwoThetaExperimentsPredictor(ExperimentsPredictor):
def _predict_one_experiment(self, experiment, reflections):
B = flex.mat3_double(len(reflections), experiment.crystal.get_B())
r0 = B * reflections["miller_index"].as_vec3_double()
r0len = r0.norms()
wl = experiment.beam.get_wavelength()
# 2theta = 2 * arcsin( |r0| / (2 * |s0| ) )
reflections["2theta_cal.rad"] = 2.0 * flex.asin(0.5 * r0len * wl)
reflections.set_flags(
flex.size_t(len(reflections)), reflections.flags.predicted
)
class TwoThetaTarget(Target):
_grad_names = ["d2theta_dp"]
rmsd_names = ["RMSD_2theta"]
rmsd_units = ["rad"]
def __init__(
self, experiments, predictor, reflection_manager, prediction_parameterisation
):
Target.__init__(
self,
experiments,
predictor,
reflection_manager,
prediction_parameterisation,
)
# set the single cutoff for 2theta residual to essentially zero
self._binsize_cutoffs = [1.0e-6]
# predict reflections and finalise reflection manager
self.predict()
self._reflection_manager.finalise()
return
def predict(self):
"""perform reflection prediction for the working reflections and update the
reflection manager"""
# get the matches
reflections = self._reflection_manager.get_obs()
# reset the 'use' flag for all observations
self._reflection_manager.reset_accepted_reflections()
# set twotheta in place
self._reflection_predictor(reflections)
# calculate residuals
reflections["2theta_resid"] = (
reflections["2theta_cal.rad"] - reflections["2theta_obs.rad"]
)
reflections["2theta_resid2"] = flex.pow2(reflections["2theta_resid"])
# set used_in_refinement flag to all those that had predictions
mask = reflections.get_flags(reflections.flags.predicted)
reflections.set_flags(mask, reflections.flags.used_in_refinement)
# collect the matches
self.update_matches(force=True)
return
@staticmethod
def _extract_residuals_and_weights(matches):
# return residuals and weights as 1d flex.double vectors
residuals = matches["2theta_resid"]
weights = matches["2theta.weights"]
return residuals, weights
@staticmethod
def _extract_squared_residuals(matches):
residuals2 = matches["2theta_resid2"]
return residuals2
def _rmsds_core(self, reflections):
"""calculate unweighted RMSDs for the specified reflections"""
resid_2theta = flex.sum(reflections["2theta_resid2"])
n = len(reflections)
rmsds = (sqrt(resid_2theta / n),)
return rmsds
def achieved(self):
"""RMSD criterion for target achieved"""
r = self._rmsds if self._rmsds else self.rmsds()
# reset cached rmsds to avoid getting out of step
self._rmsds = None
if r[0] < self._binsize_cutoffs[0]:
return True
return False
class TwoThetaPredictionParameterisation(PredictionParameterisation):
_grad_names = ("d2theta_dp",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# check that only the unit cell is parameterised
assert not self._detector_parameterisations
assert not self._beam_parameterisations
assert not self._xl_orientation_parameterisations
assert not self._goniometer_parameterisations
return
def _local_setup(self, reflections):
# we want the wavelength
self._wavelength = 1.0 / self._s0.norms()
return
def _xl_unit_cell_derivatives(self, isel, parameterisation=None, reflections=None):
# Get required data
h = self._h.select(isel)
B = self._B.select(isel)
wl = self._wavelength.select(isel)
# get derivatives of the B matrix wrt the parameters
dB_dxluc_p = [
None if der is None else flex.mat3_double(len(isel), der.elems)
for der in parameterisation.get_ds_dp(use_none_as_null=True)
]
d2theta_dp = []
# loop through the parameters
for der in dB_dxluc_p:
if der is None:
d2theta_dp.append(None)
continue
r0 = B * h
dr0 = der * h
r0len = r0.norms()
dr0len = dr0.dot(r0) / r0len
# 2theta = 2 * arcsin( |r0| / (2 * |s0| ) )
sintheta = 0.5 * r0len * wl
fac = 1.0 / flex.sqrt(flex.double(len(wl), 1.0) - flex.pow2(sintheta))
val = fac * wl * dr0len
d2theta_dp.append(val)
return d2theta_dp
def _grads_xl_unit_cell_loop(self, reflections, results, callback=None):
"""Loop over all crystal unit cell parameterisations, calculate gradients
and extend the results"""
# loop over the crystal unit cell parameterisations
for xlucp in self._xl_unit_cell_parameterisations:
# Determine (sub)set of reflections affected by this parameterisation
isel = flex.size_t()
for exp_id in xlucp.get_experiment_ids():
isel.extend(self._experiment_to_idx[exp_id])
# Extend derivative vectors for this crystal unit cell parameterisation
results = self._extend_gradient_vectors(
results, self._nref, xlucp.num_free(), keys=self._grad_names
)
if len(isel) == 0:
# if no reflections are in this experiment, skip calculation of
# gradients, but must still process null gradients by a callback
if callback is not None:
for iparam in range(xlucp.num_free()):
results[self._iparam] = callback(results[self._iparam])
self._iparam += 1
else:
self._iparam += xlucp.num_free()
continue
d2theta_dp = self._xl_unit_cell_derivatives(
isel, parameterisation=xlucp, reflections=reflections
)
for d2theta in d2theta_dp:
if d2theta is not None:
results[self._iparam][self._grad_names[0]].set_selected(
isel, d2theta
)
# increment the parameter index pointer
self._iparam += 1
return results
| bsd-3-clause | 11472e60790228a1c6103527c6b9e595 | 30.811075 | 88 | 0.601577 | 3.767747 | false | false | false | false |
dials/dials | src/dials/command_line/stills_process.py | 1 | 75177 | from __future__ import annotations
import collections
import copy
import glob
import logging
import os
import pickle
import sys
import tarfile
import time
from io import BytesIO
from dxtbx.model.experiment_list import (
Experiment,
ExperimentList,
ExperimentListFactory,
)
from libtbx.phil import parse
from libtbx.utils import Abort, Sorry
import dials.util
from dials.array_family import flex
from dials.util import log
logger = logging.getLogger("dials.command_line.stills_process")
help_message = """
DIALS script for processing still images. Import, index, refine, and integrate are all done for each image
separately.
"""
def _control_phil_str():
return """
input {
file_list = None
.type = path
.help = Path to a text file with a list of images
glob = None
.type = str
.help = For large, multi-file datasets, specify the paths using wildcards (e.g. *.cbf)
.multiple = True
image_tag = None
.type = str
.multiple = True
.help = Only process images with these tag(s). For single-image files (like CBFs or SMVs), the image \
tag for each file is the file name. For multi-image files like HDF5, the image tag is \
filename_imagenumber (including leading zeros). Use show_image_tags=True to see the list \
of image tags that will be used for a dataset.
show_image_tags = False
.type = bool
.help = Show the set of image tags that would be used during processing. To process subsets of image \
files, use these tags with the image_tag parameter.
max_images = None
.type = int
.help = Limit total number of processed images to max_images
ignore_gain_mismatch = False
.type = bool
.expert_level = 3
.help = Detector gain should be set on the detector models loaded from the images or in the \
processing parameters, not both. Override the check that this is true with this flag. \
}
dispatch {
pre_import = False
.type = bool
.expert_level = 2
.help = If True, before processing import all the data. Needed only if processing \
multiple multi-image files at once (not a recommended use case)
process_percent = None
.type = int(value_min=1, value_max=100)
.help = Percent of events to process
refine = False
.expert_level = 2
.type = bool
.help = If True, after indexing, refine the experimental models
squash_errors = True
.expert_level = 2
.type = bool
.help = If True, if an image fails to process, continue to the next image. \
otherwise, halt processing and show the error.
find_spots = True
.expert_level = 2
.type = bool
.help = Whether to do spotfinding. Needed for indexing/integration
index = True
.expert_level = 2
.type = bool
.help = Attempt to index images. find_spots also needs to be True for this to work
integrate = True
.expert_level = 2
.type = bool
.help = Integrate indexed images. Ignored if index=False or find_spots=False
coset = False
.expert_level = 2
.type = bool
.help = Within the integrate dispatcher, integrate a sublattice coset intended to represent \
negative control spots with no Bragg diffraction.
hit_finder{
enable = True
.type = bool
.help = Whether to do hitfinding. hit_finder=False: process all images
minimum_number_of_reflections = 16
.type = int
.help = If the number of strong reflections on an image is less than this, and \
the hitfinder is enabled, discard this image.
maximum_number_of_reflections = None
.type = int
.help = If specified, ignores images with more than this many number of reflections
}
}
output {
output_dir = .
.type = str
.help = Directory output files will be placed
composite_output = True
.type = bool
.help = If True, save one set of experiment/reflection files per process, where each is a \
concatenated list of all the successful events examined by that process. \
If False, output a separate experiment/reflection file per image (generates a \
lot of files).
logging_dir = None
.type = str
.help = Directory output log files will be placed
experiments_filename = None
.type = str
.help = The filename for output experiments. For example, %s_imported.expt
strong_filename = None
.type = str
.help = The filename for strong reflections from spot finder output. For example: \
%s_strong.refl
indexed_filename = %s_indexed.refl
.type = str
.help = The filename for indexed reflections.
refined_experiments_filename = %s_refined.expt
.type = str
.help = The filename for saving refined experimental models
integrated_filename = %s_integrated.refl
.type = str
.help = The filename for final integrated reflections.
integrated_experiments_filename = %s_integrated.expt
.type = str
.help = The filename for saving final experimental models.
coset_filename = %s_coset%d.refl
.type = str
.help = The filename for final coset reflections.
coset_experiments_filename = %s_coset%d.expt
.type = str
.help = The filename for saving final coset experimental models.
profile_filename = None
.type = str
.help = The filename for output reflection profile parameters
integration_pickle = int-%d-%s.pickle
.type = str
.help = Filename for cctbx.xfel-style integration pickle files
}
mp {
method = *multiprocessing sge lsf pbs mpi
.type = choice
.help = "The multiprocessing method to use"
nproc = 1
.type = int(value_min=1)
.help = "The number of processes to use."
composite_stride = None
.type = int
.help = For MPI, if using composite mode, specify how many ranks to \
aggregate data from. For example, if you have 100 processes, \
composite mode will output N*100 files, where N is the number \
of file types (expt, refl, etc). If you specify stride = 25, \
then each group of 25 process will send their results to 4 \
processes and only N*4 files will be created. Ideally, match \
stride to the number of processors per node.
debug
.expert_level = 2
{
cProfile = False
.type = bool
.help = Enable code profiling. Profiling file will be available in \
the debug folder. Use (for example) runsnake to visualize \
processing performance
output_debug_logs = True
.type = bool
.help = Whether to write debugging information for every image \
processed
}
}
"""
def _dials_phil_str():
return """
input {
reference_geometry = None
.type = str
.help = Provide an models.expt file with exactly one detector model. Data processing will use \
that geometry instead of the geometry found in the image headers.
sync_reference_geom = True
.type = bool
.help = ensures the reference hierarchy agrees with the image format
}
output {
shoeboxes = True
.type = bool
.help = Save the raw pixel values inside the reflection shoeboxes during spotfinding.
}
include scope dials.util.options.geometry_phil_scope
include scope dials.algorithms.spot_finding.factory.phil_scope
include scope dials.algorithms.indexing.indexer.phil_scope
indexing {
include scope dials.algorithms.indexing.lattice_search.basis_vector_search_phil_scope
}
include scope dials.algorithms.refinement.refiner.phil_scope
include scope dials.algorithms.integration.integrator.phil_scope
include scope dials.algorithms.profile_model.factory.phil_scope
include scope dials.algorithms.spot_prediction.reflection_predictor.phil_scope
include scope dials.algorithms.integration.stills_significance_filter.phil_scope
indexing {
stills {
method_list = None
.type = strings
.help = List of indexing methods. If indexing fails with first method, indexing will be \
attempted with the next, and so forth
known_orientations = None
.type = path
.multiple = True
.expert_level = 2
.help = Paths to previous processing results including crystal orientations. \
If specified, images will not be re-indexed, but instead the known \
orientations will be used. Provide paths to experiment list files, using \
wildcards as needed.
require_known_orientation = False
.type = bool
.expert_level = 2
.help = If known_orientations are provided, and an orientation for an image is not \
found, this is whether or not to attempt to index the image from scratch \
using indexing.method
}
}
integration {
include scope dials.algorithms.integration.kapton_correction.absorption_phil_scope
coset {
transformation = 6
.type = int(value_min=0, value_max=6)
.multiple = False
.help = The index number(s) of the modulus=2 sublattice transformation(s) used to produce distince coset results. \
0=Double a, 1=Double b, 2=Double c, 3=C-face centering, 4=B-face centering, 5=A-face centering, 6=Body centering \
See Sauter and Zwart, Acta D (2009) 65:553
}
integration_only_overrides {
trusted_range = None
.type = floats(size=2)
.help = "Override the panel trusted range [min_trusted_value, max_trusted_value] during integration."
.short_caption = "Panel trusted range"
}
}
profile {
gaussian_rs {
parameters {
sigma_b_cutoff = 0.1
.type = float
.help = Maximum sigma_b before the image is rejected
}
}
}
"""
def _program_defaults_phil_str():
return """
indexing {
method = fft1d
}
refinement {
parameterisation {
auto_reduction {
min_nref_per_parameter = 1
action = fix
}
beam.fix = all
detector.fix = all
}
reflections {
weighting_strategy.override = stills
outlier.algorithm = null
}
}
integration {
integrator = stills
profile.fitting = False
background {
algorithm = simple
simple {
outlier.algorithm = plane
model.algorithm = linear2d
}
}
}
profile.gaussian_rs.min_spots.overall = 0
"""
control_phil_str = _control_phil_str()
dials_phil_str = _dials_phil_str()
program_defaults_phil_str = _program_defaults_phil_str()
phil_scope = parse(control_phil_str + dials_phil_str, process_includes=True).fetch(
parse(program_defaults_phil_str)
)
def do_import(filename, load_models=True):
logger.info("Loading %s", os.path.basename(filename))
experiments = ExperimentListFactory.from_filenames([filename], load_models=False)
if len(experiments) == 0:
try:
experiments = ExperimentListFactory.from_json_file(filename)
except ValueError:
raise Abort(f"Could not load {filename}")
if len(experiments) == 0:
raise Abort(f"Could not load {filename}")
from dxtbx.imageset import ImageSetFactory
all_experiments = ExperimentList()
for experiment in experiments:
# Convert from ImageSequence to ImageSet, if needed
imageset = ImageSetFactory.imageset_from_anyset(experiment.imageset)
for i in range(len(imageset)):
# Preserve original models if they were available (in the case of an image file
# they will not be, but in the case of a previously processed experiment list,
# then they may be available
expt = Experiment(
imageset=imageset[i : i + 1],
detector=experiment.detector,
beam=experiment.beam,
scan=experiment.scan,
goniometer=experiment.goniometer,
crystal=experiment.crystal,
)
if load_models:
expt.load_models()
all_experiments.append(expt)
return all_experiments
def sync_geometry(src, dest):
dest.set_local_frame(
src.get_local_fast_axis(), src.get_local_slow_axis(), src.get_local_origin()
)
if not src.is_panel():
for src_child, dest_child in zip(src, dest):
sync_geometry(src_child, dest_child)
class Script:
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import ArgumentParser
# The script usage
usage = "usage: dials.stills_process [options] [param.phil] filenames"
self.tag = None
self.reference_detector = None
# Create the parser
self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=help_message)
def load_reference_geometry(self):
if self.params.input.reference_geometry is None:
return
try:
ref_experiments = ExperimentListFactory.from_json_file(
self.params.input.reference_geometry, check_format=False
)
except Exception:
try:
import dxtbx
img = dxtbx.load(self.params.input.reference_geometry)
except Exception:
raise Sorry(
"Couldn't load geometry file %s"
% self.params.input.reference_geometry
)
else:
self.reference_detector = img.get_detector()
else:
assert len(ref_experiments.detectors()) == 1
self.reference_detector = ref_experiments.detectors()[0]
def run(self, args=None):
"""Execute the script."""
from libtbx import easy_mp
try:
from mpi4py import MPI
except ImportError:
rank = 0
size = 1
else:
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
if rank == 0:
# Parse the command line
params, options, all_paths = self.parser.parse_args(
args, show_diff_phil=False, return_unhandled=True, quick_parse=True
)
if params.input.glob:
all_paths.extend(params.input.glob)
globbed = []
for p in all_paths:
g = glob.glob(p)
if not g:
sys.exit(f"Error: Unhandled path or option: {p}")
globbed.extend(g)
all_paths = globbed
if not all_paths and params.input.file_list is not None:
all_paths.extend(
[path.strip() for path in open(params.input.file_list).readlines()]
)
if params.indexing.stills.known_orientations:
known_orientations = {}
for path in params.indexing.stills.known_orientations:
for g in glob.glob(path):
ko_expts = ExperimentList.from_file(g, check_format=False)
for expt in ko_expts:
assert (
len(expt.imageset.indices()) == 1
and len(expt.imageset.paths()) == 1
)
key = (
os.path.basename(expt.imageset.paths()[0]),
expt.imageset.indices()[0],
)
if key not in known_orientations:
known_orientations[key] = []
known_orientations[key].append(expt.crystal)
if not known_orientations:
raise Sorry(
"No known_orientations found at the locations specified: %s"
% ", ".join(params.indexing.stills.known_orientations)
)
params.indexing.stills.known_orientations = known_orientations
if size > 1:
if rank == 0:
transmitted_info = params, options, all_paths
else:
transmitted_info = None
params, options, all_paths = comm.bcast(transmitted_info, root=0)
# Check we have some filenames
if not all_paths:
self.parser.print_help()
return
if params.mp.debug.cProfile:
import cProfile
self.pr = cProfile.Profile()
self.pr.enable()
print(f"Have {len(all_paths)} files")
# Mask validation
for mask_path in params.spotfinder.lookup.mask, params.integration.lookup.mask:
if mask_path is not None and not os.path.isfile(mask_path):
raise Sorry(f"Mask {mask_path} not found")
# Save the options
self.options = options
self.params = params
st = time.time()
if params.mp.method == "mpi":
# Configure the logging
if params.output.logging_dir is None:
logfile = None
else:
log_path = os.path.join(
params.output.logging_dir, "log_rank%04d.out" % rank
)
error_path = os.path.join(
params.output.logging_dir, "error_rank%04d.out" % rank
)
print(f"Redirecting stdout to {log_path}")
print(f"Redirecting stderr to {error_path}")
sys.stdout = open(log_path, "a")
sys.stderr = open(error_path, "a")
print("Should be redirected now")
logfile = os.path.join(
params.output.logging_dir, "info_rank%04d.out" % rank
)
log.config(verbosity=options.verbose, logfile=logfile)
else:
# Configure logging
log.config(verbosity=options.verbose, logfile="dials.process.log")
bad_phils = [f for f in all_paths if os.path.splitext(f)[1] == ".phil"]
if len(bad_phils) > 0:
self.parser.print_help()
logger.error(
"Error: the following phil files were not understood: %s",
", ".join(bad_phils),
)
return
# Log the diff phil
diff_phil = self.parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
for abs_params in self.params.integration.absorption_correction:
if abs_params.apply:
if not (
self.params.integration.debug.output
and not self.params.integration.debug.separate_files
):
raise Sorry(
"Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
+ "Set integration.debug.output=True, integration.debug.separate_files=False and "
+ "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
)
self.load_reference_geometry()
from dials.command_line.dials_import import ManualGeometryUpdater
update_geometry = ManualGeometryUpdater(params)
# Import stuff
logger.info("Loading files...")
pre_import = params.dispatch.pre_import or len(all_paths) == 1
if pre_import:
# Handle still imagesets by breaking them apart into multiple experiments
# Further handle single file still imagesets (like HDF5) by tagging each
# frame using its index
experiments = ExperimentList()
for path in sorted(all_paths):
experiments.extend(do_import(path, load_models=False))
indices = []
basenames = []
basename_counts = {}
split_experiments = []
for i, imageset in enumerate(experiments.imagesets()):
assert len(imageset) == 1
paths = imageset.paths()
indices.append(i)
basename = os.path.splitext(os.path.basename(paths[0]))[0]
basenames.append(basename)
if basename in basename_counts:
basename_counts[basename] += 1
else:
basename_counts[basename] = 1
split_experiments.append(experiments[i : i + 1])
tags = []
split_experiments2 = []
for i, basename in zip(indices, basenames):
if basename_counts[basename] > 1:
tag = "%s_%05d" % (basename, i)
else:
tag = basename
if (
not self.params.input.image_tag
or tag in self.params.input.image_tag
):
tags.append(tag)
split_experiments2.append(split_experiments[i])
split_experiments = split_experiments2
# Wrapper function
def do_work(i, item_list, processor=None, finalize=True):
if not processor:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % i, rank=i
)
for item in item_list:
tag = item[0]
experiments = split_experiments[item[1]]
try:
assert len(experiments) == 1
experiment = experiments[0]
experiment.load_models()
imageset = experiment.imageset
update_geometry(imageset)
experiment.beam = imageset.get_beam()
experiment.detector = imageset.get_detector()
except RuntimeError as e:
logger.warning("Error updating geometry on item %s, %s", tag, e)
continue
if self.reference_detector is not None:
experiment = experiments[0]
if self.params.input.sync_reference_geom:
imageset = experiment.imageset
sync_geometry(
self.reference_detector.hierarchy(),
imageset.get_detector().hierarchy(),
)
experiment.detector = imageset.get_detector()
else:
experiment.detector = copy.deepcopy(self.reference_detector)
processor.process_experiments(tag, experiments)
imageset.clear_cache()
if finalize:
processor.finalize()
return processor
iterable = list(zip(tags, range(len(split_experiments))))
else:
basenames = collections.defaultdict(int)
sorted_paths = sorted(all_paths)
for filename in sorted_paths:
basename = os.path.splitext(os.path.basename(filename))[0]
basenames[basename] += 1
tags = []
all_paths2 = []
for i, (basename, count) in enumerate(basenames.items()):
if count > 1:
tag = "%s_%05d" % (basename, i)
else:
tag = basename
if (
not self.params.input.image_tag
or tag in self.params.input.image_tag
):
tags.append(tag)
all_paths2.append(sorted_paths[i])
all_paths = all_paths2
# Wrapper function
def do_work(i, item_list, processor=None, finalize=True):
if not processor:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % i, rank=i
)
for item in item_list:
tag, filename = item
experiments = do_import(filename, load_models=True)
imagesets = experiments.imagesets()
if len(imagesets) == 0 or len(imagesets[0]) == 0:
logger.info("Zero length imageset in file: %s", filename)
return
if len(imagesets) > 1:
raise Abort(f"Found more than one imageset in file: {filename}")
if len(imagesets[0]) > 1:
raise Abort(
"Found a multi-image file. Run again with pre_import=True"
)
try:
update_geometry(imagesets[0])
experiment = experiments[0]
experiment.beam = imagesets[0].get_beam()
experiment.detector = imagesets[0].get_detector()
except RuntimeError as e:
logger.warning("Error updating geometry on item %s, %s", tag, e)
continue
if self.reference_detector is not None:
if self.params.input.sync_reference_geom:
imageset = experiments[0].imageset
sync_geometry(
self.reference_detector.hierarchy(),
imageset.get_detector().hierarchy(),
)
experiments[0].detector = imageset.get_detector()
else:
experiments[0].detector = copy.deepcopy(
self.reference_detector
)
processor.process_experiments(tag, experiments)
if finalize:
processor.finalize()
return processor
iterable = list(zip(tags, all_paths))
if params.input.max_images:
iterable = iterable[: params.input.max_images]
if params.input.show_image_tags:
print("Showing image tags for this dataset and exiting")
for tag, item in iterable:
print(tag)
return
# prepare fractions of process_percent, if given
process_fractions = None
if params.dispatch.process_percent:
import fractions
percent = params.dispatch.process_percent / 100
process_fractions = fractions.Fraction(percent).limit_denominator(100)
def process_this_event(nevent):
# nevent modulo the denominator gives us which fraction we're in
n_mod_denom = nevent % process_fractions.denominator
# compare the 0-indexed modulo against the 1-indexed numerator (intentionally not <=)
n_accept = n_mod_denom < process_fractions.numerator
return n_accept
# Process the data
if params.mp.method == "mpi":
if size <= 2: # client/server only makes sense for n>2
subset = [
item for i, item in enumerate(iterable) if (i + rank) % size == 0
]
do_work(rank, subset)
else:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % rank, rank=rank
)
if rank == 0:
# server process
for item_num, item in enumerate(iterable):
if process_fractions and not process_this_event(item_num):
continue
print("Getting next available process")
rankreq = comm.recv(source=MPI.ANY_SOURCE)
print(f"Process {rankreq} is ready, sending {item[0]}\n")
comm.send(item, dest=rankreq)
# send a stop command to each process
print("MPI DONE, sending stops\n")
for rankreq in range(size - 1):
rankreq = comm.recv(source=MPI.ANY_SOURCE)
print("Sending stop to %d\n" % rankreq)
comm.send("endrun", dest=rankreq)
print("All stops sent.")
else:
# client process
while True:
# inform the server this process is ready for an event
print("Rank %d getting next task" % rank)
comm.send(rank, dest=0)
print("Rank %d waiting for response" % rank)
item = comm.recv(source=0)
if item == "endrun":
print("Rank %d received endrun" % rank)
break
print("Rank %d beginning processing" % rank)
try:
processor = do_work(rank, [item], processor, finalize=False)
except Exception as e:
print(
"Rank %d unhandled exception processing event" % rank,
str(e),
)
print("Rank %d event processed" % rank)
processor.finalize()
else:
from dxtbx.command_line.image_average import splitit
if params.mp.nproc == 1:
do_work(0, iterable)
else:
result = list(
easy_mp.multi_core_run(
myfunction=do_work,
argstuples=list(enumerate(splitit(iterable, params.mp.nproc))),
nproc=params.mp.nproc,
)
)
error_list = [r[2] for r in result]
if error_list.count(None) != len(error_list):
print(
"Some processes failed execution. Not all images may have processed. Error messages:"
)
for error in error_list:
if error is None:
continue
print(error)
# Total Time
logger.info("")
logger.info("Total Time Taken = %f seconds", time.time() - st)
if params.mp.debug.cProfile:
self.pr.disable()
self.pr.dump_stats(
os.path.join(
self.params.output.output_dir, "debug", "cpu_%d.prof" % comm.rank
)
)
class Processor:
def __init__(self, params, composite_tag=None, rank=0):
self.params = params
self.composite_tag = composite_tag
# The convention is to put %s in the phil parameter to add a tag to
# each output datafile. Save the initial templates here.
self.experiments_filename_template = params.output.experiments_filename
self.strong_filename_template = params.output.strong_filename
self.indexed_filename_template = params.output.indexed_filename
self.refined_experiments_filename_template = (
params.output.refined_experiments_filename
)
self.integrated_filename_template = params.output.integrated_filename
self.integrated_experiments_filename_template = (
params.output.integrated_experiments_filename
)
if params.dispatch.coset:
self.coset_filename_template = params.output.coset_filename
self.coset_experiments_filename_template = (
params.output.coset_experiments_filename
)
debug_dir = os.path.join(params.output.output_dir, "debug")
if not os.path.exists(debug_dir):
try:
os.makedirs(debug_dir)
except OSError:
pass # due to multiprocessing, makedirs can sometimes fail
assert os.path.exists(debug_dir)
self.debug_file_path = os.path.join(debug_dir, "debug_%d.txt" % rank)
write_newline = os.path.exists(self.debug_file_path)
if write_newline: # needed if the there was a crash
self.debug_write("")
if params.output.composite_output:
assert composite_tag is not None
self.all_imported_experiments = ExperimentList()
self.all_strong_reflections = flex.reflection_table()
self.all_indexed_experiments = ExperimentList()
self.all_indexed_reflections = flex.reflection_table()
self.all_integrated_experiments = ExperimentList()
self.all_integrated_reflections = flex.reflection_table()
self.all_int_pickle_filenames = []
self.all_int_pickles = []
self.all_coset_experiments = ExperimentList()
self.all_coset_reflections = flex.reflection_table()
self.setup_filenames(composite_tag)
def setup_filenames(self, tag):
# before processing, set output paths according to the templates
if (
self.experiments_filename_template is not None
and "%s" in self.experiments_filename_template
):
self.params.output.experiments_filename = os.path.join(
self.params.output.output_dir,
self.experiments_filename_template % ("idx-" + tag),
)
if (
self.strong_filename_template is not None
and "%s" in self.strong_filename_template
):
self.params.output.strong_filename = os.path.join(
self.params.output.output_dir,
self.strong_filename_template % ("idx-" + tag),
)
if (
self.indexed_filename_template is not None
and "%s" in self.indexed_filename_template
):
self.params.output.indexed_filename = os.path.join(
self.params.output.output_dir,
self.indexed_filename_template % ("idx-" + tag),
)
if (
self.refined_experiments_filename_template is not None
and "%s" in self.refined_experiments_filename_template
):
self.params.output.refined_experiments_filename = os.path.join(
self.params.output.output_dir,
self.refined_experiments_filename_template % ("idx-" + tag),
)
if (
self.integrated_filename_template is not None
and "%s" in self.integrated_filename_template
):
self.params.output.integrated_filename = os.path.join(
self.params.output.output_dir,
self.integrated_filename_template % ("idx-" + tag),
)
if (
self.integrated_experiments_filename_template is not None
and "%s" in self.integrated_experiments_filename_template
):
self.params.output.integrated_experiments_filename = os.path.join(
self.params.output.output_dir,
self.integrated_experiments_filename_template % ("idx-" + tag),
)
if (
self.params.dispatch.coset
and self.coset_filename_template is not None
and "%s" in self.coset_filename_template
):
self.params.output.coset_filename = os.path.join(
self.params.output.output_dir,
self.coset_filename_template
% ("idx-" + tag, self.params.integration.coset.transformation),
)
if (
self.params.dispatch.coset
and self.coset_experiments_filename_template is not None
and "%s" in self.coset_experiments_filename_template
):
self.params.output.coset_experiments_filename = os.path.join(
self.params.output.output_dir,
self.coset_experiments_filename_template
% ("idx-" + tag, self.params.integration.coset.transformation),
)
def debug_start(self, tag):
if not self.params.mp.debug.output_debug_logs:
return
import socket
self.debug_str = f"{socket.gethostname()},{tag}"
self.debug_str += ",%s,%s,%s\n"
self.debug_write("start")
def debug_write(self, string, state=None):
if not self.params.mp.debug.output_debug_logs:
return
from xfel.cxi.cspad_ana import cspad_tbx # XXX move to common timestamp format
ts = cspad_tbx.evt_timestamp() # Now
debug_file_handle = open(self.debug_file_path, "a")
if string == "":
debug_file_handle.write("\n")
else:
if state is None:
state = " "
debug_file_handle.write(self.debug_str % (ts, state, string))
debug_file_handle.close()
def process_experiments(self, tag, experiments):
if not self.params.output.composite_output:
self.setup_filenames(tag)
self.tag = tag
self.debug_start(tag)
if self.params.output.experiments_filename:
if self.params.output.composite_output:
self.all_imported_experiments.extend(experiments)
else:
experiments.as_json(self.params.output.experiments_filename)
# Do the processing
try:
self.pre_process(experiments)
except Exception as e:
print("Error in pre-process", tag, str(e))
self.debug_write("preprocess_exception", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.find_spots:
self.debug_write("spotfind_start")
observed = self.find_spots(experiments)
else:
print("Spot Finding turned off. Exiting")
self.debug_write("data_loaded", "done")
return
except Exception as e:
print("Error spotfinding", tag, str(e))
self.debug_write("spotfinding_exception", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.index:
if (
self.params.dispatch.hit_finder.enable
and len(observed)
< self.params.dispatch.hit_finder.minimum_number_of_reflections
):
print("Not enough spots to index", tag)
self.debug_write(f"not_enough_spots_{len(observed)}", "stop")
return
if (
self.params.dispatch.hit_finder.maximum_number_of_reflections
is not None
):
if (
self.params.dispatch.hit_finder.enable
and len(observed)
> self.params.dispatch.hit_finder.maximum_number_of_reflections
):
print("Too many spots to index - Possibly junk", tag)
self.debug_write(f"too_many_spots_{len(observed)}", "stop")
return
self.debug_write("index_start")
experiments, indexed = self.index(experiments, observed)
else:
print("Indexing turned off. Exiting")
self.debug_write(f"spotfinding_ok_{len(observed)}", "done")
return
except Exception as e:
print("Couldn't index", tag, str(e))
if not self.params.dispatch.squash_errors:
raise
self.debug_write(f"indexing_failed_{len(observed)}", "stop")
return
self.debug_write("refine_start")
try:
experiments, indexed = self.refine(experiments, indexed)
except Exception as e:
print("Error refining", tag, str(e))
self.debug_write(f"refine_failed_{len(indexed)}", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.integrate:
self.debug_write("integrate_start")
integrated = self.integrate(experiments, indexed)
else:
print("Integration turned off. Exiting")
self.debug_write(f"index_ok_{len(indexed)}", "done")
return
except Exception as e:
print("Error integrating", tag, str(e))
self.debug_write(f"integrate_failed_{len(indexed)}", "fail")
if not self.params.dispatch.squash_errors:
raise
return
self.debug_write(f"integrate_ok_{len(integrated)}", "done")
def pre_process(self, experiments):
"""Add any pre-processing steps here"""
if (
self.params.indexing.stills.known_orientations
and self.params.indexing.stills.require_known_orientation
):
for expt in experiments:
assert (
len(expt.imageset.indices()) == 1
and len(expt.imageset.paths()) == 1
)
key = (
os.path.basename(expt.imageset.paths()[0]),
expt.imageset.indices()[0],
)
if key not in self.params.indexing.stills.known_orientations:
raise RuntimeError("Image not found in set of known orientations")
if not self.params.input.ignore_gain_mismatch:
g1 = self.params.spotfinder.threshold.dispersion.gain
g2 = self.params.integration.summation.detector_gain
gain = g1 if g1 is not None else g2
if gain is not None and gain != 1.0:
for detector in experiments.detectors():
for panel in detector:
if panel.get_gain() != 1.0 and panel.get_gain() != gain:
raise RuntimeError(
"""
The detector is reporting a gain of %f but you have also supplied a gain of %f. Since the detector gain is not 1.0, your supplied gain will be multiplicatively applied in addition to the detector's gain, which is unlikely to be correct. Please re-run, removing spotfinder.dispersion.gain and integration.summation.detector_gain from your parameters. You can override this exception by setting input.ignore_gain_mismatch=True."""
% (panel.get_gain(), gain)
)
def find_spots(self, experiments):
st = time.time()
logger.info("*" * 80)
logger.info("Finding Strong Spots")
logger.info("*" * 80)
# Find the strong spots
observed = flex.reflection_table.from_observations(
experiments, self.params, is_stills=True
)
# Reset z coordinates for dials.image_viewer; see Issues #226 for details
xyzobs = observed["xyzobs.px.value"]
for i in range(len(xyzobs)):
xyzobs[i] = (xyzobs[i][0], xyzobs[i][1], 0)
bbox = observed["bbox"]
for i in range(len(bbox)):
bbox[i] = (bbox[i][0], bbox[i][1], bbox[i][2], bbox[i][3], 0, 1)
if self.params.output.composite_output:
n = len(self.all_strong_reflections.experiment_identifiers())
for i, experiment in enumerate(experiments):
refls = observed.select(observed["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_strong_reflections.extend(refls)
n += 1
else:
# Save the reflections to file
logger.info("\n" + "-" * 80)
if self.params.output.strong_filename:
self.save_reflections(observed, self.params.output.strong_filename)
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return observed
def index(self, experiments, reflections):
from dials.algorithms.indexing.indexer import Indexer
st = time.time()
logger.info("*" * 80)
logger.info("Indexing Strong Spots")
logger.info("*" * 80)
params = copy.deepcopy(self.params)
# don't do scan-varying refinement during indexing
params.refinement.parameterisation.scan_varying = False
if hasattr(self, "known_crystal_models"):
known_crystal_models = self.known_crystal_models
elif self.params.indexing.stills.known_orientations:
known_crystal_models = []
extended_experiments = ExperimentList()
for expt in experiments:
assert (
len(expt.imageset.indices()) == 1
and len(expt.imageset.paths()) == 1
)
key = (
os.path.basename(expt.imageset.paths()[0]),
expt.imageset.indices()[0],
)
if key not in self.params.indexing.stills.known_orientations:
if self.params.indexing.stills.require_known_orientation:
raise RuntimeError(
"Image not found in set of known orientations"
)
else:
oris = [None]
else:
oris = self.params.indexing.stills.known_orientations[key]
known_crystal_models.extend(oris)
extended_experiments.extend(ExperimentList([expt] * len(oris)))
experiments = extended_experiments
else:
known_crystal_models = None
indexing_succeeded = False
if known_crystal_models:
try:
idxr = Indexer.from_parameters(
reflections,
experiments,
known_crystal_models=known_crystal_models,
params=params,
)
idxr.index()
logger.info("indexed from known orientation")
indexing_succeeded = True
except Exception:
if self.params.indexing.stills.require_known_orientation:
raise
if params.indexing.stills.method_list is None and not indexing_succeeded:
idxr = Indexer.from_parameters(
reflections,
experiments,
params=params,
)
idxr.index()
elif not indexing_succeeded:
indexing_error = None
for method in params.indexing.stills.method_list:
params.indexing.method = method
try:
idxr = Indexer.from_parameters(
reflections, experiments, params=params
)
idxr.index()
except Exception as e:
logger.info("Couldn't index using method %s", method)
if indexing_error is None:
if e is None:
e = Exception(f"Couldn't index using method {method}")
indexing_error = e
else:
indexing_error = None
break
if indexing_error is not None:
raise indexing_error
indexed = idxr.refined_reflections
experiments = idxr.refined_experiments
if known_crystal_models is not None:
filtered_sel = flex.bool(len(indexed), True)
for expt_id in range(len(experiments)):
for idx in set(
indexed["miller_index"].select(indexed["id"] == expt_id)
):
sel = (indexed["miller_index"] == idx) & (indexed["id"] == expt_id)
if sel.count(True) > 1:
filtered_sel = filtered_sel & ~sel
filtered = indexed.select(filtered_sel)
logger.info(
"Filtered duplicate reflections, %d out of %d remaining",
len(filtered),
len(indexed),
)
print(
"Filtered duplicate reflections, %d out of %d remaining"
% (len(filtered), len(indexed))
)
indexed = filtered
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return experiments, indexed
def refine(self, experiments, centroids):
if self.params.dispatch.refine:
from dials.algorithms.refinement import RefinerFactory
st = time.time()
logger.info("*" * 80)
logger.info("Refining Model")
logger.info("*" * 80)
refiner = RefinerFactory.from_parameters_data_experiments(
self.params, centroids, experiments
)
refiner.run()
experiments = refiner.get_experiments()
predicted = refiner.predict_for_indexed()
centroids["xyzcal.mm"] = predicted["xyzcal.mm"]
centroids["entering"] = predicted["entering"]
centroids = centroids.select(refiner.selection_used_for_refinement())
# Re-estimate mosaic estimates
from dials.algorithms.indexing.nave_parameters import NaveParameters
nv = NaveParameters(
params=self.params,
experiments=experiments,
reflections=centroids,
refinery=refiner,
graph_verbose=False,
)
nv()
acceptance_flags_nv = nv.nv_acceptance_flags
centroids = centroids.select(acceptance_flags_nv)
if self.params.output.composite_output:
if (
self.params.output.refined_experiments_filename
or self.params.output.indexed_filename
):
assert (
self.params.output.refined_experiments_filename is not None
and self.params.output.indexed_filename is not None
)
n = len(self.all_indexed_experiments)
self.all_indexed_experiments.extend(experiments)
for i, experiment in enumerate(experiments):
refls = centroids.select(centroids["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_indexed_reflections.extend(refls)
n += 1
else:
# Dump experiments to disk
if self.params.output.refined_experiments_filename:
experiments.as_json(self.params.output.refined_experiments_filename)
if self.params.output.indexed_filename:
self.save_reflections(centroids, self.params.output.indexed_filename)
if self.params.dispatch.refine:
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return experiments, centroids
def integrate(self, experiments, indexed):
st = time.time()
logger.info("*" * 80)
logger.info("Integrating Reflections")
logger.info("*" * 80)
indexed, _ = self.process_reference(indexed)
if self.params.integration.integration_only_overrides.trusted_range:
for detector in experiments.detectors():
for panel in detector:
panel.set_trusted_range(
self.params.integration.integration_only_overrides.trusted_range
)
if self.params.dispatch.coset:
from xfel.util.sublattice_helper import integrate_coset
integrate_coset(self, experiments, indexed)
# Get the integrator from the input parameters
logger.info("Configuring integrator from input parameters")
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
# Compute the profile model
# Predict the reflections
# Match the predictions with the reference
# Create the integrator
experiments = ProfileModelFactory.create(self.params, experiments, indexed)
new_experiments = ExperimentList()
new_reflections = flex.reflection_table()
for expt_id, expt in enumerate(experiments):
if (
self.params.profile.gaussian_rs.parameters.sigma_b_cutoff is None
or expt.profile.sigma_b()
< self.params.profile.gaussian_rs.parameters.sigma_b_cutoff
):
refls = indexed.select(indexed["id"] == expt_id)
refls["id"] = flex.int(len(refls), len(new_experiments))
# refls.reset_ids()
del refls.experiment_identifiers()[expt_id]
refls.experiment_identifiers()[len(new_experiments)] = expt.identifier
new_reflections.extend(refls)
new_experiments.append(expt)
else:
logger.info(
"Rejected expt %d with sigma_b %f"
% (expt_id, expt.profile.sigma_b())
)
experiments = new_experiments
indexed = new_reflections
if len(experiments) == 0:
raise RuntimeError("No experiments after filtering by sigma_b")
logger.info("")
logger.info("=" * 80)
logger.info("")
logger.info("Predicting reflections")
logger.info("")
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=self.params.prediction.d_min,
dmax=self.params.prediction.d_max,
margin=self.params.prediction.margin,
force_static=self.params.prediction.force_static,
)
predicted.match_with_reference(indexed)
logger.info("")
integrator = create_integrator(self.params, experiments, predicted)
# Integrate the reflections
integrated = integrator.integrate()
# correct integrated intensities for absorption correction, if necessary
for abs_params in self.params.integration.absorption_correction:
if abs_params.apply:
if abs_params.algorithm == "fuller_kapton":
from dials.algorithms.integration.kapton_correction import (
multi_kapton_correction,
)
elif abs_params.algorithm == "kapton_2019":
from dials.algorithms.integration.kapton_2019_correction import (
multi_kapton_correction,
)
experiments, integrated = multi_kapton_correction(
experiments, integrated, abs_params.fuller_kapton, logger=logger
)()
if self.params.significance_filter.enable:
from dials.algorithms.integration.stills_significance_filter import (
SignificanceFilter,
)
sig_filter = SignificanceFilter(self.params)
filtered_refls = sig_filter(experiments, integrated)
accepted_expts = ExperimentList()
accepted_refls = flex.reflection_table()
logger.info(
"Removed %d reflections out of %d when applying significance filter",
len(integrated) - len(filtered_refls),
len(integrated),
)
for expt_id, expt in enumerate(experiments):
refls = filtered_refls.select(filtered_refls["id"] == expt_id)
if len(refls) > 0:
accepted_expts.append(expt)
refls["id"] = flex.int(len(refls), len(accepted_expts) - 1)
accepted_refls.extend(refls)
else:
logger.info(
"Removed experiment %d which has no reflections left after applying significance filter",
expt_id,
)
if len(accepted_refls) == 0:
raise Sorry("No reflections left after applying significance filter")
experiments = accepted_expts
integrated = accepted_refls
# Delete the shoeboxes used for intermediate calculations, if requested
if self.params.integration.debug.delete_shoeboxes and "shoebox" in integrated:
del integrated["shoebox"]
if self.params.output.composite_output:
if (
self.params.output.integrated_experiments_filename
or self.params.output.integrated_filename
):
assert (
self.params.output.integrated_experiments_filename is not None
and self.params.output.integrated_filename is not None
)
n = len(self.all_integrated_experiments)
self.all_integrated_experiments.extend(experiments)
for i, experiment in enumerate(experiments):
refls = integrated.select(integrated["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_integrated_reflections.extend(refls)
n += 1
else:
# Dump experiments to disk
if self.params.output.integrated_experiments_filename:
experiments.as_json(self.params.output.integrated_experiments_filename)
if self.params.output.integrated_filename:
# Save the reflections
self.save_reflections(
integrated, self.params.output.integrated_filename
)
self.write_integration_pickles(integrated, experiments)
from dials.algorithms.indexing.stills_indexer import (
calc_2D_rmsd_and_displacements,
)
rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed)
log_str = f"RMSD indexed (px): {rmsd_indexed:f}\n"
for i in range(6):
bright_integrated = integrated.select(
(
integrated["intensity.sum.value"]
/ flex.sqrt(integrated["intensity.sum.variance"])
)
>= i
)
if len(bright_integrated) > 0:
rmsd_integrated, _ = calc_2D_rmsd_and_displacements(bright_integrated)
else:
rmsd_integrated = 0
log_str += (
"N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n"
% (i, len(bright_integrated), rmsd_integrated)
)
for crystal_model in experiments.crystals():
if hasattr(crystal_model, "get_domain_size_ang"):
log_str += ". Final ML model: domain size angstroms: {:f}, half mosaicity degrees: {:f}".format(
crystal_model.get_domain_size_ang(),
crystal_model.get_half_mosaicity_deg(),
)
logger.info(log_str)
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return integrated
def write_integration_pickles(self, integrated, experiments, callback=None):
"""
Write a serialized python dictionary with integrated intensities and other information
suitible for use by cxi.merge or prime.postrefine.
@param integrated Reflection table with integrated intensities
@param experiments Experiment list. One integration pickle for each experiment will be created.
@param callback Deriving classes can use callback to make further modifications to the dictionary
before it is serialized. Callback should be a function with this signature:
def functionname(params, outfile, frame), where params is the phil scope, outfile is the path
to the pickle that will be saved, and frame is the python dictionary to be serialized.
"""
if not hasattr(self.params.output, "integration_pickle"):
return
if self.params.output.integration_pickle is not None:
from xfel.command_line.frame_extractor import ConstructFrame
# Split everything into separate experiments for pickling
for e_number, experiment in enumerate(experiments):
e_selection = integrated["id"] == e_number
reflections = integrated.select(e_selection)
frame = ConstructFrame(reflections, experiment).make_frame()
frame["pixel_size"] = experiment.detector[0].get_pixel_size()[0]
if not hasattr(self, "tag") or self.tag is None:
try:
# if the data was a file on disc, get the path
event_timestamp = os.path.splitext(
experiments[0].imageset.paths()[0]
)[0]
except NotImplementedError:
# if the data is in memory only, check if the reader set a timestamp on the format object
event_timestamp = (
experiment.imageset.reader().get_format(0).timestamp
)
event_timestamp = os.path.basename(event_timestamp)
if event_timestamp.find("shot-") == 0:
event_timestamp = os.path.splitext(event_timestamp)[
0
] # micromanage the file name
else:
event_timestamp = self.tag
if hasattr(self.params.output, "output_dir"):
outfile = os.path.join(
self.params.output.output_dir,
self.params.output.integration_pickle
% (e_number, event_timestamp),
)
else:
outfile = os.path.join(
os.path.dirname(self.params.output.integration_pickle),
self.params.output.integration_pickle
% (e_number, event_timestamp),
)
if callback is not None:
callback(self.params, outfile, frame)
if self.params.output.composite_output:
self.all_int_pickle_filenames.append(os.path.basename(outfile))
self.all_int_pickles.append(frame)
else:
with open(outfile, "wb") as fh:
pickle.dump(frame, fh, protocol=pickle.HIGHEST_PROTOCOL)
def process_reference(self, reference):
"""Load the reference spots."""
if reference is None:
return None, None
st = time.time()
assert "miller_index" in reference
assert "id" in reference
logger.info("Processing reference reflections")
logger.info(" read %d strong spots", len(reference))
mask = reference.get_flags(reference.flags.indexed)
rubbish = reference.select(~mask)
if mask.count(False) > 0:
reference.del_selected(~mask)
logger.info(" removing %d unindexed reflections", mask.count(True))
if len(reference) == 0:
raise Sorry(
"""
Invalid input for reference reflections.
Expected > %d indexed spots, got %d
"""
% (0, len(reference))
)
mask = reference["miller_index"] == (0, 0, 0)
if mask.count(True) > 0:
rubbish.extend(reference.select(mask))
reference.del_selected(mask)
logger.info(" removing %d reflections with hkl (0,0,0)", mask.count(True))
mask = reference["id"] < 0
if mask.count(True) > 0:
raise Sorry(
"""
Invalid input for reference reflections.
%d reference spots have an invalid experiment id
"""
% mask.count(True)
)
logger.info(" using %d indexed reflections", len(reference))
logger.info(" found %d junk reflections", len(rubbish))
logger.info(" time taken: %g", time.time() - st)
return reference, rubbish
def save_reflections(self, reflections, filename):
"""Save the reflections to file."""
st = time.time()
logger.info("Saving %d reflections to %s", len(reflections), filename)
reflections.as_file(filename)
logger.info(" time taken: %g", time.time() - st)
def finalize(self):
"""Perform any final operations"""
if self.params.output.composite_output:
if self.params.mp.composite_stride is not None:
assert self.params.mp.method == "mpi"
stride = self.params.mp.composite_stride
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
comm.barrier()
if rank % stride == 0:
subranks = [rank + i for i in range(1, stride) if rank + i < size]
for i in range(len(subranks)):
logger.info("Rank %d waiting for sender", rank)
(
sender,
imported_experiments,
strong_reflections,
indexed_experiments,
indexed_reflections,
integrated_experiments,
integrated_reflections,
coset_experiments,
coset_reflections,
int_pickles,
int_pickle_filenames,
) = comm.recv(source=MPI.ANY_SOURCE)
logger.info("Rank %d received data from rank %d", rank, sender)
def extend_with_bookkeeping(
src_expts, src_refls, dest_expts, dest_refls
):
n = len(dest_refls.experiment_identifiers())
src_refls["id"] += n
idents = src_refls.experiment_identifiers()
keys = idents.keys()
values = idents.values()
for key in keys:
del idents[key]
for i, key in enumerate(keys):
idents[key + n] = values[i]
dest_expts.extend(src_expts)
dest_refls.extend(src_refls)
if len(imported_experiments) > 0:
extend_with_bookkeeping(
imported_experiments,
strong_reflections,
self.all_imported_experiments,
self.all_strong_reflections,
)
if len(indexed_experiments) > 0:
extend_with_bookkeeping(
indexed_experiments,
indexed_reflections,
self.all_indexed_experiments,
self.all_indexed_reflections,
)
if len(integrated_experiments) > 0:
extend_with_bookkeeping(
integrated_experiments,
integrated_reflections,
self.all_integrated_experiments,
self.all_integrated_reflections,
)
if len(coset_experiments) > 0:
extend_with_bookkeeping(
coset_experiments,
coset_reflections,
self.all_coset_experiments,
self.all_coset_reflections,
)
self.all_int_pickles.extend(int_pickles)
self.all_int_pickle_filenames.extend(int_pickle_filenames)
else:
destrank = (rank // stride) * stride
logger.info(
"Rank %d sending results to rank %d",
rank,
(rank // stride) * stride,
)
comm.send(
(
rank,
self.all_imported_experiments,
self.all_strong_reflections,
self.all_indexed_experiments,
self.all_indexed_reflections,
self.all_integrated_experiments,
self.all_integrated_reflections,
self.all_coset_experiments,
self.all_coset_reflections,
self.all_int_pickles,
self.all_int_pickle_filenames,
),
dest=destrank,
)
self.all_imported_experiments = (
self.all_strong_reflections
) = (
self.all_indexed_experiments
) = (
self.all_indexed_reflections
) = (
self.all_integrated_experiments
) = (
self.all_integrated_reflections
) = (
self.all_coset_experiments
) = (
self.all_coset_reflections
) = self.all_int_pickles = self.all_integrated_reflections = []
# Dump composite files to disk
if (
len(self.all_imported_experiments) > 0
and self.params.output.experiments_filename
):
self.all_imported_experiments.as_json(
self.params.output.experiments_filename
)
if (
len(self.all_strong_reflections) > 0
and self.params.output.strong_filename
):
self.save_reflections(
self.all_strong_reflections, self.params.output.strong_filename
)
if (
len(self.all_indexed_experiments) > 0
and self.params.output.refined_experiments_filename
):
self.all_indexed_experiments.as_json(
self.params.output.refined_experiments_filename
)
if (
len(self.all_indexed_reflections) > 0
and self.params.output.indexed_filename
):
self.save_reflections(
self.all_indexed_reflections, self.params.output.indexed_filename
)
if (
len(self.all_integrated_experiments) > 0
and self.params.output.integrated_experiments_filename
):
self.all_integrated_experiments.as_json(
self.params.output.integrated_experiments_filename
)
if (
len(self.all_integrated_reflections) > 0
and self.params.output.integrated_filename
):
self.save_reflections(
self.all_integrated_reflections,
self.params.output.integrated_filename,
)
if self.params.dispatch.coset:
if (
len(self.all_coset_experiments) > 0
and self.params.output.coset_experiments_filename
):
self.all_coset_experiments.as_json(
self.params.output.coset_experiments_filename
)
if (
len(self.all_coset_reflections) > 0
and self.params.output.coset_filename
):
self.save_reflections(
self.all_coset_reflections, self.params.output.coset_filename
)
# Create a tar archive of the integration dictionary pickles
if len(self.all_int_pickles) > 0 and self.params.output.integration_pickle:
tar_template_integration_pickle = (
self.params.output.integration_pickle.replace("%d", "%s")
)
outfile = (
os.path.join(
self.params.output.output_dir,
tar_template_integration_pickle % ("x", self.composite_tag),
)
+ ".tar"
)
tar = tarfile.TarFile(outfile, "w")
for i, (fname, d) in enumerate(
zip(self.all_int_pickle_filenames, self.all_int_pickles)
):
string = BytesIO(pickle.dumps(d, protocol=2))
info = tarfile.TarInfo(name=fname)
info.size = string.getbuffer().nbytes
info.mtime = time.time()
tar.addfile(tarinfo=info, fileobj=string)
tar.close()
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
| bsd-3-clause | 386c9e8ae801952b4b2639f854db7290 | 39.636216 | 428 | 0.534166 | 4.530372 | false | false | false | false |
dials/dials | src/dials/algorithms/scaling/model/components/scale_components.py | 1 | 23435 | """
Classes that define a component of a scaling model.
Each class holds the parameters and relevant data, as a list of
arrays, from which to calculate inverse scale factors and
derivatives.
The components are initialised without any data, which is added
by setting the data dict. In order to update the internal data
lists in order to calculate the scales and derivatives, the
update_reflection_data method should be called, which can optionally
be provided with selection arrays to split the data for blockwise/parallel
calculations.
The scaling algorithm makes use of the components in the following way.
First, the data for all 'suitable' reflections are added to the components.
Then, at different stages of the algorithm, selection lists are provided
to select a subset of this data (e.g. a small subset to prepare the
component for minimisation calculation, or a large subset for calculating
the scales for all reflections). The selection lists typically come from
the Ih_table datastructures so that the data in the components is split in
the same way as the data in the Ih_table datastructure.
"""
from __future__ import annotations
from scitbx import sparse
from dials.array_family import flex
from dials_scaling_ext import calculate_harmonic_tables_from_selections
class ScaleComponentBase:
"""
Base scale component class.
This defines an interface to access the parameters, the component
of the inverse scale factor and it's derivatives with respect to
the parameters. Scale components derived from the base class are
designed to be instantiated by a ScalingModel class, by supplying
an initial array of parameters and optionally the current estimated
standard deviations. The relevant data from a reflection table is
added later by a Scaler using the update_reflection_data method.
This behaviour allows data to easily be added/changed after selecting
subsets of the data.
"""
def __init__(self, initial_values, parameter_esds=None):
"""Set the initial parameter values, parameter esds and n_params."""
self._parameters = initial_values
self._parameter_esds = parameter_esds
self._n_params = len(self._parameters)
self._var_cov = None
self._n_refl = [] # store as a list, to allow holding of data in blocks
self._parameter_restraints = None
self._data = {}
@property
def data(self):
"""
Return a dictionary of reflection data relevant to the particular component.
This is designed to be a dict of arrays which can be selected from when
updating the component (i.e. selecting subsets).
"""
return self._data
@data.setter
def data(self, data):
self._data = data
@data.deleter
def data(self):
self._data = {}
@property
def parameter_restraints(self):
"""Restraint weights for the component parameters."""
return self._parameter_restraints
@parameter_restraints.setter
def parameter_restraints(self, restraints):
assert restraints.size() == self.parameters.size()
self._parameter_restraints = restraints
@property
def n_params(self):
"""Get the number of parameters of the component (read-only)."""
return self._n_params
@property
def parameters(self):
"""Parameters of the component."""
return self._parameters
@parameters.setter
def parameters(self, new_parameters):
assert len(new_parameters) == len(
self._parameters
), f"""
attempting to set a new set of parameters of different length than previous
assignment: was {len(self._parameters)}, attempting {len(new_parameters)}"""
self._parameters = new_parameters
@property
def free_parameters(self):
return self._parameters
@free_parameters.setter
def free_parameters(self, parameters):
self._parameters = parameters
@property
def parameter_esds(self):
"""Return the estimated standard deviations of the parameters."""
return self._parameter_esds
@parameter_esds.setter
def parameter_esds(self, esds):
assert len(esds) == len(self.parameters)
self._parameter_esds = esds
@property
def free_parameter_esds(self):
"""Return the estimated standard deviations of the parameters."""
return self._parameter_esds
@free_parameter_esds.setter
def free_parameter_esds(self, esds):
assert len(esds) == len(self.free_parameters)
self._parameter_esds = esds
def calculate_restraints(self):
"""Calculate residual and gradient restraints for the component."""
return None
def calculate_jacobian_restraints(self):
"""Calculate residual and jacobian restraints for the component."""
return None
@property
def var_cov_matrix(self):
"""Return the variance-covariance matrix of the parameters."""
return self._var_cov
@var_cov_matrix.setter
def var_cov_matrix(self, var_cov):
self._var_cov = var_cov
@property
def n_refl(self):
"""Return a list of the number of reflections in each block."""
return self._n_refl
def update_reflection_data(self, selection=None, block_selections=None):
"""
Update the internal data arrays.
Use the data stored in self.data, optionally with a selection array
or list of selections, to populate a list of internal arrays e.g n_refl,
normalised_values etc. to allow scale and derivative calculations. If no
selection arrays are provided, the internal arrays will be lists
containing one array/value, depending on the data type needed for
derivative and scale calculation.
Args:
selection: A flex.bool selection array to select a subset of the
internal data.
block_selections (list): A list of flex.size_t arrays to select
subsets of the internal data.
"""
raise NotImplementedError()
def calculate_scales_and_derivatives(self, block_id=0):
"""Calculate and return inverse scales and derivatives for a given block."""
raise NotImplementedError()
def calculate_scales(self, block_id=0):
"""Calculate and return inverse scales for a given block."""
raise NotImplementedError()
class SingleScaleFactor(ScaleComponentBase):
"""
A model component consisting of a single global scale parameter.
The inverse scale factor for every reflection is the parameter
value itself and the derivatives are therefore all 1.0.
"""
null_parameter_value = 1.0
def __init__(self, initial_values, parameter_esds=None):
"""Set the initial parameter values, parameter esds and n_params."""
assert (
len(initial_values) == 1
), """
This model component can only hold a single parameter."""
super().__init__(initial_values, parameter_esds)
@ScaleComponentBase.data.setter
def data(self, data):
"""Set the data dict in the parent class."""
assert set(data.keys()) == {"id"}, set(data.keys())
self._data = data
def update_reflection_data(self, selection=None, block_selections=None):
"""
Update the internal n_refl list.
Use the data stored in self.data, optionally with a boolean selection array
or list of flex.size_t index selections, to make a list of n_refl (of length
1 or len(block_selections)) by inspecting the size of the selection result,
in order to allow scale and derivative calculations.
Args:
selection: Optional, a flex.bool selection array to select a subset of
the internal data.
block_selections (list): Optional, a list of flex.size_t arrays to
select subsets of the internal data.
"""
data = self.data["id"]
if selection:
self._n_refl = [data.select(selection).size()]
elif block_selections:
self._n_refl = [data.select(sel).size() for sel in block_selections]
else:
self._n_refl = [data.size()]
def calculate_scales_and_derivatives(self, block_id=0):
"""Calculate and return inverse scales and derivatives for a given block."""
scales = flex.double(self.n_refl[block_id], self._parameters[0])
derivatives = sparse.matrix(self.n_refl[block_id], 1)
for i in range(self.n_refl[block_id]):
derivatives[i, 0] = 1.0
return scales, derivatives
def calculate_scales(self, block_id=0):
"""Calculate and return inverse scales for a given block."""
return flex.double(self.n_refl[block_id], self._parameters[0])
class SingleBScaleFactor(ScaleComponentBase):
"""
A model component for a single global B-factor parameter.
The inverse scale factor for each reflection is given by
S = exp(B/(2 * d^2)), the derivatives are S/(2 * d^2).
"""
null_parameter_value = 0.0
def __init__(self, initial_values, parameter_esds=None):
"""Set the initial parameter values, parameter esds and n_params."""
super().__init__(initial_values, parameter_esds)
self._d_values = []
@property
def d_values(self):
"""Return a list of arrays of d-values associated with this component."""
return self._d_values
@ScaleComponentBase.data.setter
def data(self, data):
"""Set the data dict in the parent class."""
assert set(data.keys()) == {"d"}, set(data.keys())
self._data = data
def update_reflection_data(self, selection=None, block_selections=None):
"""
Update the internal n_refl and d_values lists.
Use the data stored in self.data, optionally with a boolean selection array
or list of flex.size_t index selections, to make a lists of n_refl and
d_value arrays (of length 1 or len(block_selections)), in order to allow
scale and derivative calculations.
Args:
selection: Optional, a flex.bool selection array to select a subset of
the internal data.
block_selections (list): Optional, a list of flex.size_t arrays to
select subsets of the internal data.
"""
data = self.data["d"]
if selection:
self._d_values = [data.select(selection)]
elif block_selections:
self._d_values = [data.select(sel) for sel in block_selections]
else:
self._d_values = [data]
self._n_refl = [dvalues.size() for dvalues in self._d_values]
def calculate_scales_and_derivatives(self, block_id=0):
"""Calculate and return inverse scales and derivatives for a given block."""
d_squared = self._d_values[block_id] * self._d_values[block_id]
scales = flex.exp(
flex.double(self._n_refl[block_id], self._parameters[0]) / (2.0 * d_squared)
)
derivatives = sparse.matrix(self._n_refl[block_id], 1)
for i in range(self._n_refl[block_id]):
derivatives[i, 0] = scales[i] / (2.0 * d_squared[i])
return scales, derivatives
def calculate_scales(self, block_id=0):
"""Calculate and return inverse scales for a given block."""
scales = flex.exp(
flex.double(self._n_refl[block_id], self._parameters[0])
/ (2.0 * (self._d_values[block_id] * self._d_values[block_id]))
)
return scales
class LinearDoseDecay(ScaleComponentBase):
"""
A model component for a decay that depends linearly on dose
(see Holton Acta D 2019 D75 113-122)
For the dose dependent, the form is I = I0 exp(-ln(2) D/ Hd).
Parameterise this as linear function of rotation with an overall factor
to refine. T(r) = exp(Cr/d - i.e. a one parameter model with the overall
'dose' proportional factor C.
"""
null_parameter_value = 0.0
def __init__(self, initial_values, parameter_esds=None):
"""Set the initial parameter values, parameter esds and n_params."""
super().__init__(initial_values, parameter_esds)
self._d_values = []
self._x = [] # rotation/time
@property
def d_values(self):
"""Return a list of arrays of d-values associated with this component."""
return self._d_values
@ScaleComponentBase.data.setter
def data(self, data):
"""Set the data dict in the parent class."""
assert set(data.keys()) == {"d", "x"}, set(data.keys())
self._data = data
def update_reflection_data(self, selection=None, block_selections=None):
"""
Update the internal n_refl and d_values lists.
Use the data stored in self.data, optionally with a boolean selection array
or list of flex.size_t index selections, to make a lists of n_refl and
d_value arrays (of length 1 or len(block_selections)), in order to allow
scale and derivative calculations.
Args:
selection: Optional, a flex.bool selection array to select a subset of
the internal data.
block_selections (list): Optional, a list of flex.size_t arrays to
select subsets of the internal data.
"""
d = self.data["d"]
x = self.data["x"]
if selection:
self._d_values = [d.select(selection)]
self._x = [x.select(selection)]
elif block_selections:
self._d_values = [d.select(sel) for sel in block_selections]
self._x = [x.select(sel) for sel in block_selections]
else:
self._d_values = [d]
self._x = [x]
self._n_refl = [dvalues.size() for dvalues in self._d_values]
def calculate_scales_and_derivatives(self, block_id=0):
"""Calculate and return inverse scales and derivatives for a given block."""
scales = flex.exp(
self._parameters[0] * self._x[block_id] / self._d_values[block_id]
)
derivatives = sparse.matrix(self._n_refl[block_id], 1)
for i in range(self._n_refl[block_id]):
derivatives[i, 0] = scales[i] * (
self._x[block_id][i] / self._d_values[block_id][i]
)
return scales, derivatives
def calculate_scales(self, block_id=0):
"""Calculate and return inverse scales for a given block."""
scales = flex.exp(
self._parameters[0] * self._x[block_id] / self._d_values[block_id]
)
return scales
class QuadraticDoseDecay(LinearDoseDecay):
"""
A model component for a decay that depends quadratically on dose
For the dose dependent, the form is I = I0 exp(-C D/ d^2).
Parameterise this as linear function of rotation with an overall factor
to refine. T(r) = exp(Cr/d^2) - i.e. a one parameter model with the overall
'dose' proportional factor C.
"""
def calculate_scales_and_derivatives(self, block_id=0):
"""Calculate and return inverse scales and derivatives for a given block."""
scales = flex.exp(
self._parameters[0] * self._x[block_id] / (self._d_values[block_id] ** 2)
)
derivatives = sparse.matrix(self._n_refl[block_id], 1)
for i in range(self._n_refl[block_id]):
derivatives[i, 0] = scales[i] * (
self._x[block_id][i] / (self._d_values[block_id][i] ** 2)
)
return scales, derivatives
def calculate_scales(self, block_id=0):
"""Calculate and return inverse scales for a given block."""
scales = flex.exp(
self._parameters[0] * self._x[block_id] / (self._d_values[block_id] ** 2)
)
return scales
class SHScaleComponent(ScaleComponentBase):
"""
A model component for a spherical harmonic absorption correction.
This component uses a set of spherical harmonic functions to define
an absorption surface for the crystal. A matrix of spherical harmonic
coefficients for the data is stored in self._harmonic_values and is
used to calculate the scales and derivatives.
The scale is given by S = 1 + (sum_l sum_m Clm * Ylm) where Clm are
the model parameters and Ylm are the spherical harmonic coefficients,
the derivatives are then simply the coefficients Ylm.
"""
null_parameter_value = 0.0
coefficients_list = None # shared class variable to reduce memory load
def __init__(self, initial_values, parameter_esds=None):
"""Set the initial parameter values, parameter esds and n_params."""
super().__init__(initial_values, parameter_esds)
self._harmonic_values = []
self._matrices = []
@property
def harmonic_values(self):
"""Return the matrix of harmonic coefficients for the internal data."""
return self._harmonic_values
@property
def sph_harm_table(self):
"""Return the matrix of the full harmonic coefficient for a reflection table."""
return self._data["sph_harm_table"]
@sph_harm_table.setter
def sph_harm_table(self, sht):
self._data["sph_harm_table"] = sht
@ScaleComponentBase.data.setter
def data(self, data):
"""Set the data dict in the parent class."""
try:
assert set(data.keys()) == {"s1_lookup", "s0_lookup"}, set(data.keys())
self._mode = "memory"
except AssertionError:
assert set(data.keys()) == {"sph_harm_table"}, set(data.keys())
self._mode = "speed" # Note: only speedier for small datasets
self._data = data
def calculate_restraints(self):
"""Calculate residual and gradient restraints for the component."""
residual = self.parameter_restraints * self._parameters * self._parameters
gradient = 2.0 * self.parameter_restraints * self._parameters
return residual, gradient
def calculate_jacobian_restraints(self):
"""Calculate residual and jacobian restraints for the component."""
jacobian = sparse.matrix(self.n_params, self.n_params)
for i in range(self.n_params):
jacobian[i, i] = 1.0
return self._parameters, jacobian, self._parameter_restraints
def update_reflection_data(self, selection=None, block_selections=None):
"""
Update the internal n_refl and harmonic_values lists.
Use the harmonic values matrix stored in self.data, optionally with a
boolean selection array or list of flex.size_t index selections, to make
lists of n_refl and harmonic_value arrays (of length 1 or
len(block_selections)), in order to allow scale and derivative calculations.
Args:
selection: Optional, a flex.bool selection array to select a subset of
the internal data.
block_selections (list): Optional, a list of flex.size_t arrays to
select subsets of the internal data.
"""
if self._mode == "speed":
self._update_reflection_data_speedmode(selection, block_selections)
elif self._mode == "memory":
self._update_reflection_data_memorymode(selection, block_selections)
else:
raise ValueError
def _update_reflection_data_memorymode(self, selection=None, block_selections=None):
if len(self.coefficients_list) != self.n_params:
self.coefficients_list = self.coefficients_list[0 : self.n_params]
# modify only for this instance, only needs to be done once per instance.
if selection:
n0 = self.data["s0_lookup"].select(selection)
n1 = self.data["s1_lookup"].select(selection)
values, matrix = calculate_harmonic_tables_from_selections(
n0, n1, self.coefficients_list
)
self._harmonic_values = [values]
self._matrices = [matrix]
elif block_selections:
self._harmonic_values = []
self._matrices = []
for sel in block_selections:
n0 = self.data["s0_lookup"].select(sel)
n1 = self.data["s1_lookup"].select(sel)
values, matrix = calculate_harmonic_tables_from_selections(
n0, n1, self.coefficients_list
)
self._harmonic_values.append(values)
self._matrices.append(matrix)
else:
n0 = self.data["s0_lookup"]
n1 = self.data["s1_lookup"]
values, matrix = calculate_harmonic_tables_from_selections(
n0, n1, self.coefficients_list
)
self._harmonic_values = [values]
self._matrices = [matrix]
self._n_refl = [val[0].size() for val in self._harmonic_values]
def _update_reflection_data_speedmode(self, selection=None, block_selections=None):
if selection:
sel_sph_harm_table = self.data["sph_harm_table"].select_columns(
selection.iselection()
)
self._harmonic_values = [sel_sph_harm_table.transpose()]
elif block_selections:
self._harmonic_values = []
for sel in block_selections:
block_sph_harm_table = self.data["sph_harm_table"].select_columns(sel)
self._harmonic_values.append(block_sph_harm_table.transpose())
else:
self._harmonic_values = [self.data["sph_harm_table"].transpose()]
self._n_refl = [val.n_rows for val in self._harmonic_values]
def calculate_scales(self, block_id=0):
"""Calculate and return inverse scales for a given block."""
if self._mode == "speed":
return self._calculate_scales_and_derivatives_speedmode(
block_id, derivatives=False
)
elif self._mode == "memory":
return self._calculate_scales_and_derivatives_memorymode(
block_id, derivatives=False
)
def calculate_scales_and_derivatives(self, block_id=0):
"""Calculate and return inverse scales and derivatives for a given block."""
if self._mode == "speed":
return self._calculate_scales_and_derivatives_speedmode(block_id)
elif self._mode == "memory":
return self._calculate_scales_and_derivatives_memorymode(block_id)
def _calculate_scales_and_derivatives_speedmode(self, block_id, derivatives=True):
abs_scale = flex.double(
self._harmonic_values[block_id].n_rows, 1.0
) # Unity term
for i, col in enumerate(self._harmonic_values[block_id].cols()):
abs_scale += flex.double(col.as_dense_vector() * self._parameters[i])
if derivatives:
return abs_scale, self._harmonic_values[block_id]
return abs_scale
def _calculate_scales_and_derivatives_memorymode(self, block_id, derivatives=True):
abs_scale = flex.double(
self._harmonic_values[block_id][0].size(), 1.0
) # Unity term
for i, arr in enumerate(
self._harmonic_values[block_id]
): # iterate over a list of arrays
abs_scale += arr * self._parameters[i]
if derivatives:
return abs_scale, self._matrices[block_id]
return abs_scale
| bsd-3-clause | 342874cb9ccda9895592da2d584d2c75 | 38.855442 | 88 | 0.632174 | 4.069283 | false | false | false | false |
douban/dpark | dpark/utils/profile.py | 1 | 1268 | from __future__ import absolute_import
from __future__ import print_function
import socket
import getpass
import sys
from cProfile import Profile
from pstats import Stats
from tempfile import NamedTemporaryFile
from functools import wraps
from datetime import datetime
def profile(hostname=None, to_stdout=False):
def print_stats(stats):
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
stats.sort_stats('cumulative')
stats.print_stats(20)
def decorator(f):
@wraps(f)
def _(*args, **kwargs):
prof = Profile()
try:
return prof.runcall(f, *args, **kwargs)
finally:
if to_stdout:
stats = Stats(prof)
print_stats(stats)
else:
with NamedTemporaryFile(prefix='dpark_profile_', delete=False) as fd:
print('===\n', datetime.today(), getpass.getuser(), sys.argv[0], file=fd)
stats = Stats(prof, stream=fd)
print_stats(stats)
return _
if hostname is None or socket.gethostname() == hostname:
return decorator
else:
return lambda f: f
| bsd-3-clause | 7629686e7d14f4b70124c4ac29ec4419 | 28.488372 | 97 | 0.566246 | 4.464789 | false | false | false | false |
dials/dials | src/dials/command_line/find_spots_client.py | 1 | 7823 | from __future__ import annotations
import http.client
import json
import os
import select
import socket as pysocket
import sys
import urllib.error
import urllib.parse
import urllib.request
from multiprocessing.pool import ThreadPool
import libtbx.phil
from dxtbx.model.crystal import CrystalFactory
from dxtbx.util import get_url_scheme
from libtbx.introspection import number_of_processors
from scitbx.array_family import flex
import dials.util
def work(host, port, filename, params):
conn = http.client.HTTPConnection(host, port)
path = filename
for param in params:
path += f";{param}"
conn.request("GET", path)
return conn.getresponse().read()
def _nproc():
return number_of_processors(return_value_if_unknown=-1)
def response_to_xml(d):
if "n_spots_total" in d:
response = f"""
<spot_count>{d['n_spots_total']}</spot_count>
<spot_count_no_ice>{d['n_spots_no_ice']}</spot_count_no_ice>
<d_min>{d['estimated_d_min']:.2f}</d_min>
<d_min_method_1>{d['d_min_distl_method_1']:.2f}</d_min_method_1>
<d_min_method_2>{d['d_min_distl_method_2']:.2f}</d_min_method_2>
<total_intensity>{d['total_intensity']:.0f}</total_intensity>"""
else:
assert "error" in d
return f"<response>\n{d['error']}\n</response>"
if "lattices" in d:
for lattice in d["lattices"]:
crystal = CrystalFactory.from_dict(lattice["crystal"])
response = "\n".join(
[
response,
"<unit_cell>%.6g %.6g %.6g %.6g %.6g %.6g</unit_cell>"
% (crystal.get_unit_cell().parameters()),
]
)
response = "\n".join(
[
response,
"<n_indexed>%i</n_indexed>" % d["n_indexed"],
"<fraction_indexed>%.2f</fraction_indexed>" % d["fraction_indexed"],
]
)
if "integrated_intensity" in d:
response = "\n".join(
[
response,
"<integrated_intensity>%.0f</integrated_intensity>"
% d["integrated_intensity"],
]
)
return f"<response>\n{response}\n</response>"
def work_all(
host,
port,
filenames,
params,
plot=False,
table=False,
json_file=None,
grid=None,
nproc=None,
):
if nproc is None:
nproc = _nproc()
with ThreadPool(processes=nproc) as pool:
threads = {}
for filename in filenames:
threads[filename] = pool.apply_async(work, (host, port, filename, params))
results = []
for filename in filenames:
response = threads[filename].get()
d = json.loads(response)
results.append(d)
print(response_to_xml(d))
if json_file is not None:
with open(json_file, "wb") as f:
json.dump(results, f)
if plot or table:
from dials.algorithms.spot_finding.per_image_analysis import (
StatsMultiImage,
plot_stats,
)
estimated_d_min = flex.double()
d_min_distl_method_1 = flex.double()
d_min_distl_method_2 = flex.double()
n_spots_total = flex.int()
n_spots_no_ice = flex.int()
total_intensity = flex.double()
for d in results:
estimated_d_min.append(d["estimated_d_min"])
d_min_distl_method_1.append(d["d_min_distl_method_1"])
d_min_distl_method_2.append(d["d_min_distl_method_2"])
n_spots_total.append(d["n_spots_total"])
n_spots_no_ice.append(d["n_spots_no_ice"])
total_intensity.append(d["total_intensity"])
stats = StatsMultiImage(
n_spots_total=n_spots_total,
n_spots_no_ice=n_spots_no_ice,
n_spots_4A=None,
total_intensity=total_intensity,
estimated_d_min=estimated_d_min,
d_min_distl_method_1=d_min_distl_method_1,
d_min_distl_method_2=d_min_distl_method_2,
noisiness_method_1=None,
noisiness_method_2=None,
)
if plot:
plot_stats(stats)
if table:
print(stats)
if grid is not None:
from matplotlib import pyplot
n_spots_no_ice.reshape(flex.grid(grid))
print(n_spots_no_ice.size())
pyplot.figure()
pyplot.pcolormesh(n_spots_no_ice.as_numpy_array(), cmap=pyplot.cm.Reds)
pyplot.savefig("spot_count.png")
def stop(host, port, nproc):
stopped = 0
for j in range(nproc):
try:
url_request = urllib.request.Request(f"http://{host}:{port}/Ctrl-C")
socket = urllib.request.urlopen(url_request, None, 3)
if socket.getcode() == "200":
stopped = stopped + 1
else:
print("socket returned code", socket.getcode())
except (pysocket.timeout, urllib.error.HTTPError) as e:
print("error on stopping server:", e)
except urllib.error.URLError as e:
if e.reason.errno != 111:
print("error on stopping server:", e)
except pysocket.error:
# Assuming this means the server killed itself before the reply left the send buffer.
stopped = stopped + 1
except http.client.BadStatusLine:
# Regular occurrence. Probably means the server stopped anyway.
stopped = stopped + 1
return stopped
phil_scope = libtbx.phil.parse(
"""\
nproc = Auto
.type = int(value_min=1)
host = localhost
.type = str
port = 1701
.type = int(value_min=1)
plot = False
.type = bool
table = False
.type = bool
json = None
.type = path
grid = None
.type = ints(size=2, value_min=1)
"""
)
@dials.util.show_mail_handle_errors()
def run(args=None):
mixed_args = args or sys.argv[1:]
if os.name != "nt":
r, w, x = select.select([sys.stdin], [], [], 0)
if len(r) > 0:
mixed_args.extend([l.strip() for rr in r for l in rr.readlines()])
filenames = []
args = []
for arg in mixed_args:
if get_url_scheme(arg):
# Make this look like a path. If you squint. And are looking away.
filenames.append("/" + urllib.parse.quote(arg))
else:
if os.path.isfile(arg):
filenames.append(arg)
else:
args.append(arg)
interp = phil_scope.command_line_argument_interpreter()
params, unhandled = interp.process_and_fetch(
args, custom_processor="collect_remaining"
)
params = params.extract()
if params.nproc is libtbx.Auto:
nproc = None
params.nproc = 1024
else:
nproc = params.nproc
if len(unhandled) and unhandled[0] == "stop":
stopped = stop(params.host, params.port, params.nproc)
print("Stopped %d findspots processes" % stopped)
elif len(unhandled) and unhandled[0] == "ping":
url = "http://%s:%i" % (params.host, params.port)
try:
_ = urllib.request.urlopen(url).read()
print("Success")
sys.exit(0)
except Exception:
print("Failure")
sys.exit(1)
else:
if len(filenames) == 1:
response = work(params.host, params.port, filenames[0], unhandled)
print(response_to_xml(json.loads(response)))
else:
work_all(
params.host,
params.port,
filenames,
unhandled,
plot=params.plot,
table=params.table,
json_file=params.json,
grid=params.grid,
nproc=nproc,
)
if __name__ == "__main__":
run()
| bsd-3-clause | 90f2bed13999919cf55880ab04c1caaa | 28.299625 | 97 | 0.555797 | 3.547846 | false | false | false | false |
dials/dials | src/dials/command_line/rl_png.py | 1 | 6628 | # LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
# DIALS_ENABLE_COMMAND_LINE_COMPLETION
from __future__ import annotations
import logging
import math
import libtbx.phil
from scitbx import matrix
from scitbx.array_family import flex
import dials.util
from dials.algorithms.indexing.indexer import find_max_cell
from dials.command_line.search_beam_position import run_dps
from dials.util.reciprocal_lattice import Render3d
try:
import matplotlib
except ImportError:
exit() # To pass through the "make" step, for graphics-free HPC build
# Offline backend
matplotlib.use("Agg")
logger = logging.getLogger("dials.command_line.rl_png")
help_message = """
Generate a png of the strong spots from spotfinding in reciprocal space.
Examples::
dials.rl_png imported.expt strong.refl
dials.rl_png indexed.expt indexed.refl
"""
phil_scope = libtbx.phil.parse(
"""
include scope dials.util.reciprocal_lattice.phil_scope
marker_size = 5
.type = int(value_min=1)
basis_vector_search {
n_solutions = 3
.type = int
}
plot {
size_inches = 10,10
.type = floats(size=2, value_min=0)
}
""",
process_includes=True,
)
class ReciprocalLatticePng(Render3d):
def __init__(self, settings=None):
Render3d.__init__(self, settings=settings)
self.viewer = PngScene(settings=self.settings)
class PngScene:
def __init__(self, settings):
self.settings = settings
self.rotation_axis = None
self.beam_vector = None
self.points = None
self.colors = None
self.palette = None
def set_rotation_axis(self, axis):
self.rotation_axis = axis
def set_beam_vector(self, beam):
self.beam_vector = beam
def set_points(self, points):
self.points = points
def set_points_data(self, reflections):
# we do not label reciprocal lattice points here
pass
def set_colors(self, colors):
# convert whites to black (background is white)
colors.set_selected((colors.norms() == math.sqrt(3)), (0, 0, 0))
self.colors = colors
def set_palette(self, palette):
self.palette = palette
def set_reciprocal_lattice_vectors(self, *args, **kwargs):
# we do not draw reciprocal lattice vectors at this time
pass
def set_reciprocal_crystal_vectors(self, *args, **kwargs):
# we do not draw reciprocal crystal vectors at this time either
pass
def project_2d(self, n):
d = self.points.dot(n.elems)
p = d * flex.vec3_double(len(d), n.elems)
points2d = self.points - p
x = matrix.col((1, 0, 0))
if x.angle(n) == 0 or x.angle(-n) == 0:
x = matrix.col((0, 1, 0))
x = (x - x.dot(n) * n).normalize()
y = x.cross(n)
# assert y.angle(x, deg=True) == 90
# assert y.angle(matrix.col(n), deg=True) == 90
px2d = points2d.dot(x)
py2d = points2d.dot(y)
return px2d, py2d
def plot(self, filename, n=(1, 0, 0)):
from matplotlib import pyplot
n = matrix.col(n).normalize()
x, y = self.project_2d(n)
fig = pyplot.figure(figsize=self.settings.plot.size_inches)
pyplot.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
marker="+",
s=self.settings.marker_size,
c=list(self.colors),
)
pyplot.title("Plane normal: (%.2g, %.2g, %.2g)" % (n.elems))
fig.savefig(filename)
pyplot.close()
@dials.util.show_mail_handle_errors()
def run(args=None):
from dials.util import log
from dials.util.options import (
ArgumentParser,
reflections_and_experiments_from_files,
)
usage = "dials.rl_png [options] experiments.json observations.refl"
parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
read_reflections=True,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args)
reflections, experiments = reflections_and_experiments_from_files(
params.input.reflections, params.input.experiments
)
if len(experiments) == 0 or len(reflections) == 0:
parser.print_help()
exit(0)
# Configure the logging
log.config(logfile="dials.rl_png.log")
# Log the diff phil
diff_phil = parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
reflections = reflections[0]
f = ReciprocalLatticePng(settings=params)
f.load_models(experiments, reflections)
rotation_axis = matrix.col(experiments[0].goniometer.get_rotation_axis())
s0 = matrix.col(experiments[0].beam.get_s0())
e1 = rotation_axis.normalize()
e2 = s0.normalize()
e3 = e1.cross(e2).normalize()
f.viewer.plot("rl_rotation_axis.png", n=e1.elems)
f.viewer.plot("rl_beam_vector", n=e2.elems)
f.viewer.plot("rl_e3.png", n=e3.elems)
n_solutions = params.basis_vector_search.n_solutions
if experiments.crystals().count(None) < len(experiments):
for i, c in enumerate(experiments.crystals()):
A = matrix.sqr(c.get_A())
direct_matrix = A.inverse()
a = direct_matrix[:3]
b = direct_matrix[3:6]
c = direct_matrix[6:9]
prefix = ""
if len(experiments.crystals()) > 1:
prefix = "%i_" % (i + 1)
f.viewer.plot(f"rl_{prefix}a.png", n=a)
f.viewer.plot(f"rl_{prefix}b.png", n=b)
f.viewer.plot(f"rl_{prefix}c.png", n=c)
elif n_solutions:
if "imageset_id" not in reflections:
reflections["imageset_id"] = reflections["id"]
reflections.centroid_px_to_mm(experiments)
reflections.map_centroids_to_reciprocal_space(experiments)
if params.d_min is not None:
d_spacings = 1 / reflections["rlp"].norms()
sel = d_spacings > params.d_min
reflections = reflections.select(sel)
# derive a max_cell from mm spots
max_cell = find_max_cell(
reflections, max_cell_multiplier=1.3, step_size=45
).max_cell
result = run_dps(experiments[0], reflections, max_cell)
if result:
solutions = [matrix.col(v) for v in result["solutions"]]
for i in range(min(n_solutions, len(solutions))):
v = solutions[i]
f.viewer.plot(f"rl_solution_{i + 1}.png", n=v.elems)
if __name__ == "__main__":
run()
| bsd-3-clause | 945a1be18c35e59dcc89810c93978eae | 26.616667 | 77 | 0.610893 | 3.323972 | false | false | false | false |
dials/dials | src/dials/algorithms/indexing/assign_indices.py | 1 | 6043 | from __future__ import annotations
from cctbx.array_family import flex
import dials_algorithms_indexing_ext as ext
from dials.algorithms.indexing import DialsIndexError
class AssignIndicesStrategy:
def __init__(self, d_min=None):
self._d_min = d_min
def __call__(self, reciprocal_lattice_vectors):
raise NotImplementedError()
class AssignIndicesGlobal(AssignIndicesStrategy):
def __init__(self, tolerance=0.3):
super().__init__()
self._tolerance = tolerance
def __call__(self, reflections, experiments, d_min=None):
reciprocal_lattice_points = reflections["rlp"]
reflections["miller_index"] = flex.miller_index(len(reflections), (0, 0, 0))
if d_min is not None:
d_spacings = 1 / reciprocal_lattice_points.norms()
inside_resolution_limit = d_spacings > d_min
else:
inside_resolution_limit = flex.bool(reciprocal_lattice_points.size(), True)
sel = inside_resolution_limit & (reflections["id"] == -1)
isel = sel.iselection()
rlps = reciprocal_lattice_points.select(isel)
refs = reflections.select(isel)
phi = refs["xyzobs.mm.value"].parts()[2]
UB_matrices = flex.mat3_double([cm.get_A() for cm in experiments.crystals()])
imgset_ids = reflections["imageset_id"].select(sel)
for i_imgset, imgset in enumerate(experiments.imagesets()):
sel_imgset = imgset_ids == i_imgset
result = ext.AssignIndices(
rlps.select(sel_imgset),
phi.select(sel_imgset),
UB_matrices,
tolerance=self._tolerance,
)
miller_indices = result.miller_indices()
crystal_ids = result.crystal_ids()
expt_ids = flex.int(crystal_ids.size(), -1)
for i_cryst, cryst in enumerate(experiments.crystals()):
sel_cryst = crystal_ids == i_cryst
for i_expt in experiments.where(crystal=cryst, imageset=imgset):
expt_ids.set_selected(sel_cryst, i_expt)
if experiments[i_expt].identifier:
reflections.experiment_identifiers()[i_expt] = experiments[
i_expt
].identifier
reflections["miller_index"].set_selected(
isel.select(sel_imgset), miller_indices
)
reflections["id"].set_selected(isel.select(sel_imgset), expt_ids)
reflections.set_flags(
reflections["miller_index"] != (0, 0, 0), reflections.flags.indexed
)
reflections["id"].set_selected(reflections["miller_index"] == (0, 0, 0), -1)
class AssignIndicesLocal(AssignIndicesStrategy):
def __init__(
self, d_min=None, epsilon=0.05, delta=8, l_min=0.8, nearest_neighbours=20
):
super().__init__()
self._epsilon = epsilon
self._delta = delta
self._l_min = l_min
self._nearest_neighbours = nearest_neighbours
def __call__(self, reflections, experiments, d_min=None):
from libtbx.math_utils import nearest_integer as nint
from scitbx import matrix
reciprocal_lattice_points = reflections["rlp"]
if "miller_index" not in reflections:
reflections["miller_index"] = flex.miller_index(len(reflections))
if d_min is not None:
d_spacings = 1 / reciprocal_lattice_points.norms()
inside_resolution_limit = d_spacings > d_min
else:
inside_resolution_limit = flex.bool(reciprocal_lattice_points.size(), True)
sel = inside_resolution_limit & (reflections["id"] == -1)
isel = sel.iselection()
rlps = reciprocal_lattice_points.select(isel)
refs = reflections.select(isel)
phi = refs["xyzobs.mm.value"].parts()[2]
if len(rlps) <= self._nearest_neighbours:
raise DialsIndexError(
"index_assignment.local.nearest_neighbour must be smaller than the number of accepted reflections (%d)"
% len(rlps)
)
UB_matrices = flex.mat3_double([cm.get_A() for cm in experiments.crystals()])
result = ext.AssignIndicesLocal(
rlps,
phi,
UB_matrices,
epsilon=self._epsilon,
delta=self._delta,
l_min=self._l_min,
nearest_neighbours=self._nearest_neighbours,
)
miller_indices = result.miller_indices()
crystal_ids = result.crystal_ids()
hkl = miller_indices.as_vec3_double().iround()
assert miller_indices.select(crystal_ids < 0).all_eq((0, 0, 0))
for i_cryst in set(crystal_ids):
if i_cryst < 0:
continue
A = matrix.sqr(experiments[i_cryst].crystal.get_A())
A_inv = A.inverse()
cryst_sel = crystal_ids == i_cryst
rlp_sel = rlps.select(cryst_sel)
hkl_sel = hkl.select(cryst_sel).as_vec3_double()
d_sel = 1 / rlp_sel.norms()
d_perm = flex.sort_permutation(d_sel, reverse=True)
hf_0 = A_inv * rlp_sel[d_perm[0]]
h_0 = matrix.col([nint(j) for j in hf_0.elems])
offset = h_0 - matrix.col(hkl_sel[d_perm[0]])
# print "offset:", offset.elems
h = hkl_sel + flex.vec3_double(hkl_sel.size(), offset.elems)
refs["miller_index"].set_selected(
cryst_sel, flex.miller_index(list(h.iround()))
)
refs["id"].set_selected(cryst_sel, i_cryst)
crystal_ids.set_selected(crystal_ids < 0, -1)
refs["id"] = crystal_ids
refs["miller_index"].set_selected(crystal_ids < 0, (0, 0, 0))
reflections["miller_index"].set_selected(isel, refs["miller_index"])
reflections["id"].set_selected(isel, refs["id"])
reflections.set_flags(
reflections["miller_index"] != (0, 0, 0), reflections.flags.indexed
)
| bsd-3-clause | c31ae4af13079c5fb87df0ce3e172651 | 37.490446 | 119 | 0.575542 | 3.525671 | false | false | false | false |
dials/dials | src/dials/algorithms/integration/image_integrator.py | 1 | 11125 | from __future__ import annotations
import logging
import platform
from time import time
import dials.algorithms.integration
from dials.algorithms.integration.processor import job
from dials.model.data import ImageVolume, MultiPanelImageVolume, make_image
from dials.util import log
from dials.util.log import rehandle_cached_records
from dials.util.mp import multi_node_parallel_map
from dials_algorithms_integration_integrator_ext import ReflectionManagerPerImage
logger = logging.getLogger(__name__)
class ProcessorImage:
"""Top level processor for per image processing."""
def __init__(self, experiments, reflections, params):
"""
Initialise the manager and the processor.
The processor requires a manager class implementing the Manager interface.
This class executes all the workers in separate threads and accumulates the
results to expose to the user.
:param params: The phil parameters
"""
# Create the processing manager
self.manager = ManagerImage(experiments, reflections, params)
@property
def executor(self):
"""
Get the executor
:return: The executor
"""
return self.manager.executor
@executor.setter
def executor(self, function):
"""
Set the executor
:param function: The executor
"""
self.manager.executor = function
def process(self):
"""
Do all the processing tasks.
:return: The processing results
"""
start_time = time()
self.manager.initialize()
mp_method = self.manager.params.integration.mp.method
mp_nproc = min(len(self.manager), self.manager.params.integration.mp.nproc)
mp_njobs = self.manager.params.integration.mp.njobs
if (
mp_nproc > 1 and platform.system() == "Windows"
): # platform.system() forks which is bad for MPI, so don't use it unless nproc > 1
logger.warning(
"Multiprocessing is not available on windows. Setting nproc = 1\n"
)
mp_nproc = 1
assert mp_nproc > 0, "Invalid number of processors"
logger.info(self.manager.summary())
logger.info(" Using %s with %d parallel job(s)\n", mp_method, mp_nproc)
if mp_nproc > 1:
def process_output(result):
rehandle_cached_records(result[1])
self.manager.accumulate(result[0])
result[0].reflections = None
result[0].data = None
def execute_task(task):
log.config_simple_cached()
result = task()
handlers = logging.getLogger("dials").handlers
assert len(handlers) == 1, "Invalid number of logging handlers"
return result, handlers[0].records
multi_node_parallel_map(
func=execute_task,
iterable=list(self.manager.tasks()),
njobs=mp_njobs,
nproc=mp_nproc,
callback=process_output,
cluster_method=mp_method,
preserve_order=True,
)
else:
for task in self.manager.tasks():
self.manager.accumulate(task())
self.manager.finalize()
end_time = time()
self.manager.time.user_time = end_time - start_time
result = self.manager.result()
return result, self.manager.time
class Task:
"""
A class to perform a null task.
"""
def __init__(self, index, frames, reflections, experiments, params, executor):
"""
Initialise the task
:param index: The index of the processing job
:param frames: The frames to process
:param experiments: The list of experiments
:param reflections: The list of reflections
:param params The processing parameters
:param executor: The executor class
"""
self.index = index
self.frames = frames
self.experiments = experiments
self.reflections = reflections
self.params = params
self.executor = executor
def __call__(self):
"""
Do the processing.
:return: The processed data
"""
# Set the job index
job.index = self.index
# Get the start time
start_time = time()
# Check all reflections have same imageset and get it
exp_id = list(set(self.reflections["id"]))
imageset = self.experiments[exp_id[0]].imageset
for i in exp_id[1:]:
assert (
self.experiments[i].imageset == imageset
), "Task can only handle 1 imageset"
# Get the sub imageset
frame00, frame01 = self.frames
try:
frame10, frame11 = imageset.get_array_range()
except Exception:
frame10, frame11 = (0, len(imageset))
try:
assert frame00 < frame01
assert frame10 < frame11
assert frame00 >= frame10
assert frame01 <= frame11
index0 = frame00 - frame10
index1 = index0 + (frame01 - frame00)
assert index0 < index1
assert index0 >= 0
assert index1 <= len(imageset)
imageset = imageset[index0:index1]
except Exception:
raise RuntimeError("Programmer Error: bad array range")
try:
frame0, frame1 = imageset.get_array_range()
except Exception:
frame0, frame1 = (0, len(imageset))
# Initialise the dataset
image_volume = MultiPanelImageVolume()
for panel in self.experiments[0].detector:
image_volume.add(
ImageVolume(
frame0, frame1, panel.get_image_size()[1], panel.get_image_size()[0]
)
)
# Read all the images into a block of data
read_time = 0.0
for i in range(len(imageset)):
st = time()
image = imageset.get_corrected_data(i)
mask = imageset.get_mask(i)
if self.params.integration.lookup.mask is not None:
assert len(mask) == len(
self.params.lookup.mask
), "Mask/Image are incorrect size %d %d" % (
len(mask),
len(self.params.integration.lookup.mask),
)
mask = tuple(
m1 & m2 for m1, m2 in zip(self.params.integration.lookup.mask, mask)
)
image_volume.set_image(frame0 + i, make_image(image, mask))
read_time += time() - st
del image
del mask
# Process the data
st = time()
data = self.executor.process(image_volume, self.experiments, self.reflections)
process_time = time() - st
# Set the result values
return dials.algorithms.integration.Result(
index=self.index,
reflections=self.reflections,
read_time=read_time,
process_time=process_time,
total_time=time() - start_time,
extract_time=0,
data=data,
)
class ManagerImage:
"""
A class to manage processing book-keeping
"""
def __init__(self, experiments, reflections, params):
"""
Initialise the manager.
:param experiments: The list of experiments
:param reflections: The list of reflections
:param params: The phil parameters
"""
# Initialise the callbacks
self.executor = None
# Save some data
self.experiments = experiments
self.reflections = reflections
# Save some parameters
self.params = params
# Set the finalized flag to False
self.finalized = False
# Initialise the timing information
self.time = dials.algorithms.integration.TimingInfo()
def initialize(self):
"""
Initialise the processing
"""
# Get the start time
start_time = time()
# Ensure the reflections contain bounding boxes
assert "bbox" in self.reflections, "Reflections have no bbox"
# Split the reflections into partials
self._split_reflections()
# Create the reflection manager
frames = self.experiments[0].scan.get_array_range()
self.manager = ReflectionManagerPerImage(frames, self.reflections)
# Set the initialization time
self.time.initialize = time() - start_time
def task(self, index):
"""
Get a task.
"""
return Task(
index=index,
frames=self.manager.frames(index),
reflections=self.manager.split(index),
experiments=self.experiments,
params=self.params,
executor=self.executor,
)
def tasks(self):
"""
Iterate through the tasks.
"""
for i in range(len(self)):
yield self.task(i)
def accumulate(self, result):
"""
Accumulate the results.
"""
self.manager.accumulate(result.index, result.reflections)
if result.data is not None:
self.executor.accumulate(result.index, result.data)
self.time.read += result.read_time
self.time.process += result.process_time
self.time.total += result.total_time
def finalize(self):
"""
Finalize the processing and finish.
"""
# Get the start time
start_time = time()
# Check manager is finished
assert self.manager.finished(), "Manager is not finished"
# Update the time and finalized flag
self.time.finalize = time() - start_time
self.finalized = True
def result(self):
"""
Return the result.
:return: The result
"""
assert self.finalized, "Manager is not finalized"
return self.reflections
def finished(self):
"""
Return if all tasks have finished.
:return: True/False all tasks have finished
"""
return self.finalized and self.manager.finished()
def __len__(self):
"""
Return the number of tasks.
:return: the number of tasks
"""
return len(self.manager)
def summary(self):
return ""
def _split_reflections(self):
"""
Split the reflections into partials or over job boundaries
"""
# Optionally split the reflection table into partials, otherwise,
# split over job boundaries
num_full = len(self.reflections)
self.reflections.split_partials()
num_partial = len(self.reflections)
assert num_partial >= num_full, "Invalid number of partials"
if num_partial > num_full:
logger.info(
" Split %d reflections into %d partial reflections\n",
num_full,
num_partial,
)
| bsd-3-clause | 0a29c5e8f681a8ca77ba4f002a001e62 | 29.902778 | 92 | 0.571506 | 4.457131 | false | false | false | false |
dials/dials | src/dials/algorithms/indexing/model_evaluation.py | 1 | 12078 | from __future__ import annotations
import collections
import copy
import logging
import math
import libtbx
from dxtbx.model import Crystal
from scitbx import matrix
from scitbx.array_family import flex
import dials.util
from dials.algorithms.indexing.compare_orientation_matrices import (
difference_rotation_matrix_axis_angle,
)
from dials.algorithms.refinement import RefinerFactory
from dials.util.log import LoggingContext
logger = logging.getLogger(__name__)
Result = collections.namedtuple(
"Result",
(
"model_likelihood",
"crystal",
"rmsds",
"n_indexed",
"fraction_indexed",
"hkl_offset",
),
)
def filter_doubled_cell(solutions):
accepted_solutions = []
for i1, s1 in enumerate(solutions):
doubled_cell = False
for (m1, m2, m3) in (
(2, 1, 1),
(1, 2, 1),
(1, 1, 2),
(2, 2, 1),
(2, 1, 2),
(1, 2, 2),
(2, 2, 2),
):
if doubled_cell:
break
a, b, c = (matrix.col(v) for v in s1.crystal.get_real_space_vectors())
new_cryst = Crystal(
real_space_a=1 / m1 * a,
real_space_b=1 / m2 * b,
real_space_c=1 / m3 * c,
space_group=s1.crystal.get_space_group(),
)
new_unit_cell = new_cryst.get_unit_cell()
for s2 in solutions:
if s2 is s1:
continue
if new_unit_cell.is_similar_to(
s2.crystal.get_unit_cell(), relative_length_tolerance=0.05
):
R, axis, angle, cb = difference_rotation_matrix_axis_angle(
new_cryst, s2.crystal
)
if (angle < 1) and (s1.n_indexed < (1.1 * s2.n_indexed)):
doubled_cell = True
break
if not doubled_cell:
accepted_solutions.append(s1)
return accepted_solutions
class ModelRank:
def __init__(self):
self.all_solutions = []
def append(self, item):
self.all_solutions.append(item)
def extend(self, items):
self.all_solutions.extend(items)
def best_model(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
# Tracker for solutions based on code in rstbx/dps_core/basis_choice.py
class ModelRankFilter(ModelRank):
def __init__(
self,
check_doubled_cell=True,
likelihood_cutoff=0.8,
volume_cutoff=1.25,
n_indexed_cutoff=0.9,
):
super().__init__()
self.check_doubled_cell = check_doubled_cell
self.likelihood_cutoff = likelihood_cutoff
self.volume_cutoff = volume_cutoff
self.n_indexed_cutoff = n_indexed_cutoff
self.filtered_solutions = []
def append(self, item):
super().append(item)
self.update_analysis()
def extend(self, items):
super().extend(items)
self.update_analysis()
def __len__(self):
return len(self.filtered_solutions)
def filter_by_likelihood(self, solutions):
best_likelihood = max(s.model_likelihood for s in solutions)
offset = 0
while (best_likelihood + offset) <= 0:
offset += 1
return [
s
for s in solutions
if (s.model_likelihood + offset)
>= (self.likelihood_cutoff * (best_likelihood + offset))
]
def filter_by_volume(self, solutions):
# filter by volume - prefer solutions with a smaller unit cell
min_volume = min(s.crystal.get_unit_cell().volume() for s in solutions)
return [
s
for s in solutions
if s.crystal.get_unit_cell().volume() < (self.volume_cutoff * min_volume)
]
def filter_by_n_indexed(self, solutions, n_indexed_cutoff=None):
if n_indexed_cutoff is None:
n_indexed_cutoff = self.n_indexed_cutoff
# filter by number of indexed reflections - prefer solutions that
# account for more of the diffracted spots
max_n_indexed = max(s.n_indexed for s in solutions)
return [s for s in solutions if s.n_indexed >= n_indexed_cutoff * max_n_indexed]
def update_analysis(self):
# pre-filter out solutions that only account for a very small
# percentage of the indexed spots relative to the best one
self.filtered_solutions = self.filter_by_n_indexed(
self.all_solutions, n_indexed_cutoff=0.05
) # 5 percent
if self.check_doubled_cell:
self.filtered_solutions = filter_doubled_cell(self.filtered_solutions)
self.filtered_solutions = self.filter_by_likelihood(self.filtered_solutions)
self.filtered_solutions = self.filter_by_volume(self.filtered_solutions)
self.filtered_solutions = self.filter_by_n_indexed(self.filtered_solutions)
return
def best_model(self):
self.best_filtered_liklihood = max(
s.model_likelihood for s in self.filtered_solutions
)
solutions = [
s
for s in self.filtered_solutions
if s.model_likelihood == self.best_filtered_liklihood
]
return solutions[0]
def __str__(self):
rows = []
rows.append(
["unit_cell", "volume", "n_indexed", "fraction_indexed", "likelihood"]
)
for i, s in enumerate(self.all_solutions):
s = self.all_solutions[i]
rows.append(
[
format(
s.crystal.get_unit_cell(),
"{:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {:.1f}",
),
f"{s.crystal.get_unit_cell().volume():.0f}",
str(s.n_indexed),
f"{s.fraction_indexed * 100:.0f}",
f"{s.model_likelihood:.2f}",
]
)
return dials.util.tabulate(rows, headers="firstrow")
class ModelRankWeighted(ModelRank):
def __init__(self, power=2, volume_weight=1, n_indexed_weight=1, rmsd_weight=1):
super().__init__()
self.volume_weight = volume_weight
self.n_indexed_weight = n_indexed_weight
self.rmsd_weight = rmsd_weight
self.power = power
def __len__(self):
return len(self.all_solutions)
def score_by_volume(self, reverse=False):
# smaller volume = better
volumes = flex.double(
s.crystal.get_unit_cell().volume() for s in self.all_solutions
)
score = flex.log(volumes) / math.log(2)
return self.volume_weight * (score - flex.min(score))
def score_by_rmsd_xy(self, reverse=False):
# smaller rmsds = better
rmsd_x, rmsd_y, rmsd_z = flex.vec3_double(
s.rmsds for s in self.all_solutions
).parts()
rmsd_xy = flex.sqrt(flex.pow2(rmsd_x) + flex.pow2(rmsd_y))
score = flex.log(rmsd_xy) / math.log(2)
return self.rmsd_weight * (score - flex.min(score))
def score_by_fraction_indexed(self, reverse=False):
# more indexed reflections = better
fraction_indexed = flex.double(s.fraction_indexed for s in self.all_solutions)
score = flex.log(fraction_indexed) / math.log(2)
return self.n_indexed_weight * (-score + flex.max(score))
def best_model(self):
scores = self.combined_scores()
perm = flex.sort_permutation(scores)
return self.all_solutions[perm[0]]
def combined_scores(self):
scores = sum(
flex.pow(score.as_double(), self.power)
for score in (
self.score_by_fraction_indexed(),
self.score_by_volume(),
self.score_by_rmsd_xy(),
)
)
return scores
def __str__(self):
rows = []
rows.append(
[
"unit_cell",
"volume",
"volume score",
"#indexed",
"% indexed",
"% indexed score",
"rmsd_xy",
"rmsd_xy score",
"overall score",
]
)
score_by_fraction_indexed = self.score_by_fraction_indexed()
score_by_volume = self.score_by_volume()
score_by_rmsd_xy = self.score_by_rmsd_xy()
combined_scores = self.combined_scores()
perm = flex.sort_permutation(combined_scores)
rmsd_x, rmsd_y, rmsd_z = flex.vec3_double(
s.rmsds for s in self.all_solutions
).parts()
rmsd_xy = flex.sqrt(flex.pow2(rmsd_x) + flex.pow2(rmsd_y))
for i in perm:
s = self.all_solutions[i]
rows.append(
[
format(
s.crystal.get_unit_cell(),
"{:.2f} {:.2f} {:.2f} {:.1f} {:.1f} {:.1f}",
),
f"{s.crystal.get_unit_cell().volume():.0f}",
f"{score_by_volume[i]:.2f}",
str(s.n_indexed),
f"{s.fraction_indexed * 100:.0f}",
f"{score_by_fraction_indexed[i]:.2f}",
f"{rmsd_xy[i]:.2f}",
f"{score_by_rmsd_xy[i]:.2f}",
f"{combined_scores[i]:.2f}",
]
)
return dials.util.tabulate(rows, headers="firstrow")
class Strategy:
def evaluate(self, experiments, reflections):
raise NotImplementedError()
class ModelEvaluation(Strategy):
def __init__(self, refinement_params):
self._params = copy.deepcopy(refinement_params)
# override several parameters, mainly for speed
self._params.refinement.parameterisation.auto_reduction.action = "fix"
self._params.refinement.parameterisation.scan_varying = False
self._params.refinement.refinery.max_iterations = 4
if self._params.refinement.reflections.reflections_per_degree is libtbx.Auto:
self._params.refinement.reflections.reflections_per_degree = 20
else:
self._params.refinement.reflections.reflections_per_degree = min(
self._params.refinement.reflections.reflections_per_degree, 20
)
if self._params.refinement.reflections.outlier.block_width is libtbx.Auto:
# auto block_width determination is potentially too expensive to do at
# this stage: instead set separate_blocks=False and increase value
# of tukey.iqr_multiplier to be more tolerant of outliers
self._params.refinement.reflections.outlier.separate_blocks = False
self._params.refinement.reflections.outlier.tukey.iqr_multiplier = (
2 * self._params.refinement.reflections.outlier.tukey.iqr_multiplier
)
def evaluate(self, experiments, reflections):
with LoggingContext("dials.algorithms.refinement", level=logging.ERROR):
indexed_reflections = reflections.select(reflections["id"] > -1)
try:
refiner = RefinerFactory.from_parameters_data_experiments(
self._params, indexed_reflections, experiments
)
refiner.run()
except (RuntimeError, ValueError):
return
else:
rmsds = refiner.rmsds()
xy_rmsds = math.sqrt(rmsds[0] ** 2 + rmsds[1] ** 2)
model_likelihood = 1.0 - xy_rmsds
result = Result(
model_likelihood=model_likelihood,
crystal=experiments.crystals()[0],
rmsds=rmsds,
n_indexed=len(indexed_reflections),
fraction_indexed=float(len(indexed_reflections)) / len(reflections),
hkl_offset=(0, 0, 0),
)
return result
| bsd-3-clause | b6135dcaf113c6df2ac3641994879213 | 33.215297 | 88 | 0.548435 | 3.828209 | false | false | false | false |
dials/dials | src/dials/command_line/model_background.py | 1 | 9235 | """Calculate a global background model and diagnostic images from integrated
experiments. This background model may improve subsequent integration in some
cases, such as in the presence of ice rings. The method is described in the
publication https://doi.org/10.1107/S2052252517010259.
Usage:
dials.model_background integrated.expt
dials.integrate integrated.expt refined.refl background.algorithm=gmodel gmodel.robust.algorithm=True gmodel.model=background.pickle
"""
from __future__ import annotations
import logging
import pickle
import sys
from libtbx.phil import parse
import dials.util
import dials.util.log
logger = logging.getLogger("dials.command_line.model_background")
# Set the phil scope
phil_scope = parse(
"""
output {
model = 'background.pickle'
.type = str
.help = "The output filename"
log = 'dials.model_background.log'
.type = str
.help = "The log filename"
mean_image_prefix = 'mean'
.type = str
.help = "The mean background image"
variance_image_prefix = 'variance'
.type = str
.help = "The variance background image"
dispersion_image_prefix = 'dispersion'
.type = str
.help = "The dispersion background image"
mask_image_prefix = 'mask'
.type = str
.help = "The mask background image"
min_image_prefix = 'min'
.type = str
.help = "The min background image"
max_image_prefix = 'max'
.type = str
.help = "The max background image"
model_image_prefix = 'model'
.type = str
.help = "The model background image"
polar_model_image_prefix = 'polar'
.type = str
.help = "The polar model background image"
}
modeller {
min_images = 10
.type = int(value_min=1)
.help = "The minimum number of images per pixel"
filter_type = *median mean
.type = choice
.help = "The filter to use on the polar transformed image"
kernel_size = 50
.type = int(value_min=0)
.help = "The kernel size for the median filter"
niter = 100
.type = int(value_min=1)
.help = "The number of iterations for filling holes"
image_type = min *mean
.type = choice
.help = "Which image to use"
}
include scope dials.algorithms.integration.integrator.phil_scope
include scope dials.algorithms.spot_prediction.reflection_predictor.phil_scope
""",
process_includes=True,
)
class ImageGenerator:
"""
Generate diagnostic images
"""
def __init__(self, model):
"""
Init the model
"""
import matplotlib
matplotlib.use("Agg")
self.model = model
def _save_plot(
self, name, filename, extractor_fn, bounded=True, colorbar=True, vmax=None
):
"""
Save the image
"""
from matplotlib import pylab
for i, model in enumerate(self.model):
image = extractor_fn(model)
pylab.figure(figsize=(6, 4))
if bounded and vmax is None:
boundaries = {
"vmin": 0,
"vmax": sorted(image)[int(0.99 * len(image))],
}
elif bounded:
boundaries = {"vmin": 0, "vmax": vmax}
else:
boundaries = {}
pylab.imshow(image.as_numpy_array(), interpolation="none", **boundaries)
ax1 = pylab.gca()
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if colorbar:
cb = pylab.colorbar()
cb.ax.tick_params(labelsize=8)
logger.info(
"Saving %s image for panel %d to %s_%d.png", name, i, filename, i
)
pylab.savefig("%s_%d.png" % (filename, i), dpi=600, bbox_inches="tight")
def save_min(self, filename):
"""
Save the min image
"""
self._save_plot("min", filename, lambda m: m.min_image)
def save_max(self, filename):
"""
Save the max image
"""
self._save_plot("max", filename, lambda m: m.max_image)
def save_mean(self, filename):
"""
Save the mean image
"""
self._save_plot("mean", filename, lambda m: m.mean)
def save_variance(self, filename):
"""
Save the variance image
"""
self._save_plot("variance", filename, lambda m: m.variance)
def save_dispersion(self, filename):
"""
Save the dispersion image
"""
self._save_plot("dispersion", filename, lambda m: m.dispersion, vmax=2)
def save_mask(self, filename):
"""
Save the dispersion image
"""
self._save_plot(
"mask", filename, lambda m: m.mask, bounded=False, colorbar=False
)
def save_model(self, filename):
"""
Save the model image
"""
self._save_plot("model", filename, lambda m: m.model)
def save_polar_model(self, filename):
"""
Save the polar model image
"""
self._save_plot("polar model", filename, lambda m: m.polar_model, bounded=False)
class Script:
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import ArgumentParser
usage = "dials.model_background [options] [param.phil] models.expt"
# Initialise the base class
self.parser = ArgumentParser(
usage=usage, phil=phil_scope, epilog=__doc__, read_experiments=True
)
def run(self, args=None):
"""Execute the script."""
from dials.algorithms.background.modeller import BackgroundModeller
from dials.array_family import flex
from dials.util.command_line import heading
from dials.util.options import flatten_experiments
# Parse the command line
params, options = self.parser.parse_args(args, show_diff_phil=False)
# Configure the logging
dials.util.log.config(verbosity=options.verbose, logfile=params.output.log)
if params.integration.mp.nproc != 1 or params.integration.mp.njobs != 1:
# https://github.com/dials/dials/issues/1083
logger.warning(
"Multiprocessing is currently disabled. " "Setting nproc = njobs = 1"
)
params.integration.mp.nproc = 1
params.integration.mp.njobs = 1
from dials.util.version import dials_version
logger.info(dials_version())
# Log the diff phil
diff_phil = self.parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
# Ensure we have a data block
experiments = flatten_experiments(params.input.experiments)
if len(experiments) == 0:
self.parser.print_help()
return
if any((e.profile is None for e in experiments)):
sys.exit(
"Experiments must contain a profile model (for example, after integration)"
)
# Only handle a single imageset at once
imagesets = {expr.imageset for expr in experiments}
if len(imagesets) != 1:
sys.exit("Can only process a single imageset at a time")
# Predict the reflections
logger.info("")
logger.info("=" * 80)
logger.info("")
logger.info(heading("Predicting reflections"))
logger.info("")
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=params.prediction.d_min,
dmax=params.prediction.d_max,
margin=params.prediction.margin,
force_static=params.prediction.force_static,
)
# Create the modeller
modeller = BackgroundModeller(experiments, predicted, params)
model = modeller.compute()
# Save the background model
logger.info("Saving background model to %s", params.output.model)
from dials.algorithms.background.gmodel import StaticBackgroundModel
static_model = StaticBackgroundModel()
for m in model:
static_model.add(m.model)
with open(params.output.model, "wb") as outfile:
pickle.dump(static_model, outfile, protocol=pickle.HIGHEST_PROTOCOL)
# Output some diagnostic images
image_generator = ImageGenerator(model)
image_generator.save_mean(params.output.mean_image_prefix)
image_generator.save_variance(params.output.variance_image_prefix)
image_generator.save_dispersion(params.output.dispersion_image_prefix)
image_generator.save_mask(params.output.mask_image_prefix)
image_generator.save_min(params.output.min_image_prefix)
image_generator.save_max(params.output.max_image_prefix)
image_generator.save_model(params.output.model_image_prefix)
# image_generator.save_polar_model(params.output.polar_model_image_prefix)
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
| bsd-3-clause | 9c5cad30e9d6c0a50ece0d2178d8d4cf | 29.081433 | 136 | 0.60314 | 3.949957 | false | false | false | false |
dials/dials | src/dials/util/slice.py | 1 | 3419 | from __future__ import annotations
import copy
from scitbx.array_family import flex
def slice_experiments(experiments, image_ranges):
"""
:param experiments
:type experiments: dxtbx.model.experiment_list.ExperimentList
:param image_range:
:type image_range: list of 2-tuples defining scan range for each experiment
"""
# copy the experiments
experiments = copy.deepcopy(experiments)
if len(experiments) != len(image_ranges):
raise ValueError(
"Input experiment list and image_ranges are not of the same length"
)
for exp, sr in zip(experiments, image_ranges):
if sr is None:
continue
im_range = exp.scan.get_image_range()
if sr[0] < im_range[0] or sr[1] > im_range[1]:
raise IndexError("requested slice outside current scan range")
# slicing uses the array range, not the image range
arr_start = exp.scan.get_array_range()[0]
beg = sr[0] - 1 - arr_start
end = sr[1] - arr_start
exp.scan.swap(exp.scan[beg:end])
if exp.imageset is not None:
# Gorilla of temporary workarounds for inconsistent scan and imageset slicing
# https://github.com/cctbx/dxtbx/issues/213
offset = exp.scan.get_batch_offset()
exp.imageset = exp.imageset[beg + offset : end + offset]
# account for scan-varying crystal
if exp.crystal and exp.crystal.num_scan_points > 0:
exp.crystal = slice_crystal(exp.crystal, (beg, end))
return experiments
def slice_reflections(reflections, image_ranges):
"""
:param reflections: reflection table of input reflections
:type reflections: dials.array_family.flex.reflection_table
:param image_range: list of 2-tuples defining scan range for each experiment
id contained within the reflections
:type image_range: list of 2-tuples defining scan range for each experiment
"""
# copy the reflections
reflections = copy.deepcopy(reflections)
to_keep = flex.size_t()
for iexp, sr in enumerate(image_ranges):
if sr is None:
continue
isel = (reflections["id"] == iexp).iselection()
frames = (reflections["xyzobs.px.value"].parts()[2]).select(isel)
# reflns on image n have frames in range [n-1, n)
in_low_lim = frames >= sr[0] - 1
in_high_lim = frames < sr[1]
in_lim = in_low_lim & in_high_lim
# which indices to keep?
sub_isel = isel.select(in_lim)
to_keep.extend(sub_isel)
# implicitly also removes any reflections with ID outside the range of the
# length of image_ranges
return reflections.select(to_keep)
def slice_crystal(crystal, array_range):
"""Slice a scan-varying crystal by the provided array range"""
if crystal.num_scan_points == 0:
return crystal
crystal = copy.deepcopy(crystal)
UB_mats = [crystal.get_A_at_scan_point(i) for i in range(crystal.num_scan_points)]
beg, end = array_range
end += 1 # one more scan-point than images in the range
UB_mats = UB_mats[beg:end]
B_cov = crystal.get_B_covariance_at_scan_points()
crystal.reset_scan_points()
crystal.set_A_at_scan_points(UB_mats)
if len(B_cov) > 0:
B_cov = B_cov[beg:end, 0:9, 0:9]
crystal.set_B_covariance_at_scan_points(B_cov)
return crystal
| bsd-3-clause | d62929153722235eb122cedaba756df0 | 33.19 | 89 | 0.638491 | 3.488776 | false | false | false | false |
dials/dials | tests/algorithms/integration/profile/test_empirical_modeller.py | 1 | 5054 | from __future__ import annotations
import math
from dials.algorithms.profile_model.modeller import EmpiricalProfileModeller
def evaluate_gaussian(x, a, x0, sx):
assert len(x) == len(x0)
assert len(x) == len(sx)
g = 0.0
for xi, x0i, sxi in zip(x, x0, sx):
g += (xi - x0i) ** 2 / (2.0 * sxi**2)
return a * math.exp(-g)
def gaussian(size, a, x0, sx):
from scitbx.array_family import flex
result = flex.double(flex.grid(size))
index = [0] * len(size)
while True:
result[index] = evaluate_gaussian(index, a, x0, sx)
for j in range(len(size)):
index[j] += 1
if index[j] < size[j]:
break
index[j] = 0
if j == len(size) - 1:
return result
class Modeller(EmpiricalProfileModeller):
def model(self, reflections, profiles):
from dials.array_family import flex
indices = flex.size_t(range(len(self)))
weights = flex.double([1.0] * len(self))
for profile in profiles:
self.add(indices, weights, profile)
class Test:
def setup_class(self):
self.n = 9
self.grid_size = (9, 9, 9)
self.threshold = 0.0
def test_with_identical_non_negative_profiles(self):
from scitbx.array_family import flex
# Generate identical non-negative profiles
reflections, profiles, profile = self.generate_identical_non_negative_profiles()
# Create the reference learner
modeller = Modeller(self.n, self.grid_size, self.threshold)
# Do the modelling
modeller.model(reflections, profiles)
modeller.finalize()
# Normalize the profile
profile = self.normalize_profile(profile)
# Check that all the reference profiles are the same
eps = 1e-10
for index in range(len(modeller)):
reference = modeller.data(index)
for k in range(self.grid_size[2]):
for j in range(self.grid_size[1]):
for i in range(self.grid_size[0]):
assert abs(reference[k, j, i] - profile[k, j, i]) <= eps
assert abs(flex.sum(reference) - 1.0) <= eps
def test_with_systematically_offset_profiles(self):
from scitbx.array_family import flex
# Generate identical non-negative profiles
reflections, profiles = self.generate_systematically_offset_profiles()
# Create the reference learner
modeller = Modeller(self.n, self.grid_size, self.threshold)
# Do the modelling
modeller.model(reflections, profiles)
modeller.finalize()
# Check that all the reference profiles are the same
eps = 1e-10
profile = None
for index in range(len(modeller)):
reference = modeller.data(index)
if profile is not None:
for k in range(self.grid_size[2]):
for j in range(self.grid_size[1]):
for i in range(self.grid_size[0]):
assert abs(reference[k, j, i] - profile[k, j, i]) <= eps
else:
profile = reference
assert abs(flex.sum(reference) - 1.0) <= eps
def normalize_profile(self, profile):
from scitbx.array_family import flex
max_profile = flex.max(profile)
threshold = self.threshold * max_profile
sum_profile = 0.0
for i in range(len(profile)):
if profile[i] > threshold:
sum_profile += profile[i]
else:
profile[i] = 0.0
result = flex.double(flex.grid(profile.all()))
for i in range(len(profile)):
result[i] = profile[i] / sum_profile
return result
def generate_identical_non_negative_profiles(self):
from random import uniform
from dials.array_family import flex
rlist = flex.reflection_table(1000)
profile = gaussian(self.grid_size, 1000, (4, 4, 4), (1.5, 1.5, 1.5))
xyz = flex.vec3_double(1000)
profiles = []
for i in range(1000):
x = uniform(0, 1000)
y = uniform(0, 1000)
z = uniform(0, 10)
xyz[i] = (x, y, z)
profiles.append(profile.deep_copy())
rlist["xyzcal.px"] = xyz
return rlist, profiles, profile
def generate_systematically_offset_profiles(self):
from random import uniform
from dials.array_family import flex
rlist = flex.reflection_table(1000)
xyz = flex.vec3_double(1000)
profiles = []
for i in range(1000):
x = uniform(0, 1000)
y = uniform(0, 1000)
z = uniform(0, 10)
offset = -4.5 + 9 * x / 1000.0
profile = gaussian(
self.grid_size, 1000, (4 + offset, 4, 4), (1.5, 1.5, 1.5)
)
xyz[i] = (x, y, z)
profiles.append(profile)
rlist["xyzcal.px"] = xyz
return rlist, profiles
| bsd-3-clause | 13f2d8c0e6f4b06db28924a61ed1f5f7 | 29.263473 | 88 | 0.557578 | 3.782934 | false | false | false | false |
feincms/feincms-elephantblog | tests/testapp/test_contents.py | 1 | 3083 | from django.template.loader import render_to_string
from django.test import TestCase
from elephantblog.contents import BlogCategoryListContent, BlogEntryListContent
from .factories import EntryFactory, create_category, create_entries
class Request:
GET = {"page": 1}
class ContentsTestCase(TestCase):
def test_contents(self):
entries = create_entries(EntryFactory)
entries[0].richtextcontent_set.create(
region="main", ordering=1, text="Hello world"
)
entries[0].is_featured = True
entries[0].save()
category = create_category(title="Category 1")
entries[1].categories.add(category)
create_category(title="Empty category")
BlogEntryListContent._meta.abstract = False # Hack to allow instantiation
content = BlogEntryListContent()
content.process(Request)
html = render_to_string(*content.render())
self.assertIn(
'h2 class="entry-title"><a href="/multilang/2012/10/12/eintrag-1/',
html,
)
self.assertIn(
'h2 class="entry-title"><a href="/multilang/2012/08/12/entry-1/"',
html,
)
content.featured_only = True
content.process(Request)
html = render_to_string(*content.render())
self.assertEqual(
html.count("<h2"),
1,
)
content.category = category
content.process(Request)
html = render_to_string(*content.render())
self.assertEqual(
html.count("<h2"),
0,
)
content.featured_only = False
content.process(Request)
html = render_to_string(*content.render())
self.assertEqual(
html.count("<h2"),
1,
)
content = BlogEntryListContent()
content.paginate_by = 1
content.process(Request)
html = render_to_string(*content.render())
self.assertEqual(
html.count("<h2"),
1,
)
Request.GET["page"] = 2
content.process(Request)
html = render_to_string(*content.render())
self.assertEqual(
html.count("<h2"),
1,
)
Request.GET["page"] = 3
content.process(Request)
html = render_to_string(*content.render())
self.assertEqual(
html.count("<h2"),
1,
)
Request.GET["page"] = "abc"
content.process(Request)
html = render_to_string(*content.render())
self.assertEqual(
html.count("<h2"),
1,
)
BlogCategoryListContent._meta.abstract = False # Hack to allow instantiation
content = BlogCategoryListContent()
html = render_to_string(*content.render())
self.assertEqual(
html.count("<li>"),
1,
)
content.show_empty_categories = True
html = render_to_string(*content.render())
self.assertEqual(
html.count("<li>"),
2,
)
| bsd-3-clause | 08590c79343193c5674d17609560a08b | 27.027273 | 85 | 0.557249 | 4.264177 | false | false | false | false |
chromium/gyp | test/make_global_settings/ar/gyptest-make_global_settings_ar.py | 9 | 3838 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies 'AR' in make_global_settings.
"""
import os
import sys
import TestGyp
def resolve_path(test, path):
if path is None:
return None
elif test.format == 'make':
return '$(abspath %s)' % path
elif test.format in ['ninja', 'xcode-ninja']:
return os.path.join('..', '..', path)
else:
test.fail_test()
def verify_ar_target(test, ar=None, rel_path=False):
if rel_path:
ar_expected = resolve_path(test, ar)
else:
ar_expected = ar
# Resolve default values
if ar_expected is None:
if test.format == 'make':
# Make generator hasn't set the default value for AR.
# You can remove the following assertion as long as it doesn't
# break existing projects.
test.must_not_contain('Makefile', 'AR ?= ')
return
elif test.format in ['ninja', 'xcode-ninja']:
if sys.platform == 'win32':
ar_expected = 'lib.exe'
else:
ar_expected = 'ar'
if test.format == 'make':
test.must_contain('Makefile', 'AR ?= %s' % ar_expected)
elif test.format in ['ninja', 'xcode-ninja']:
test.must_contain('out/Default/build.ninja', 'ar = %s' % ar_expected)
else:
test.fail_test()
def verify_ar_host(test, ar=None, rel_path=False):
if rel_path:
ar_expected = resolve_path(test, ar)
else:
ar_expected = ar
# Resolve default values
if ar_expected is None:
if sys.platform == 'win32':
ar_expected = 'lib.exe'
else:
ar_expected = 'ar'
if test.format == 'make':
test.must_contain('Makefile', 'AR.host ?= %s' % ar_expected)
elif test.format in ['ninja', 'xcode-ninja']:
test.must_contain('out/Default/build.ninja', 'ar_host = %s' % ar_expected)
else:
test.fail_test()
test_format = ['ninja']
if sys.platform.startswith('linux') or sys.platform == 'darwin':
test_format += ['make']
test = TestGyp.TestGyp(formats=test_format)
# Check default values
test.run_gyp('make_global_settings_ar.gyp')
verify_ar_target(test)
# Check default values with GYP_CROSSCOMPILE enabled.
with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
test.run_gyp('make_global_settings_ar.gyp')
verify_ar_target(test)
verify_ar_host(test)
# Test 'AR' in 'make_global_settings'.
with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
test.run_gyp('make_global_settings_ar.gyp', '-Dcustom_ar_target=my_ar')
verify_ar_target(test, ar='my_ar', rel_path=True)
# Test 'AR'/'AR.host' in 'make_global_settings'.
with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
test.run_gyp('make_global_settings_ar.gyp',
'-Dcustom_ar_target=my_ar_target1',
'-Dcustom_ar_host=my_ar_host1')
verify_ar_target(test, ar='my_ar_target1', rel_path=True)
verify_ar_host(test, ar='my_ar_host1', rel_path=True)
# Test $AR and $AR_host environment variables.
with TestGyp.LocalEnv({'AR': 'my_ar_target2',
'AR_host': 'my_ar_host2'}):
test.run_gyp('make_global_settings_ar.gyp')
# Ninja generator resolves $AR in gyp phase. Make generator doesn't.
if test.format == 'ninja':
if sys.platform == 'win32':
# TODO(yukawa): Make sure if this is an expected result or not.
verify_ar_target(test, ar='lib.exe', rel_path=False)
else:
verify_ar_target(test, ar='my_ar_target2', rel_path=False)
verify_ar_host(test, ar='my_ar_host2', rel_path=False)
# Test 'AR' in 'make_global_settings' with $AR_host environment variable.
with TestGyp.LocalEnv({'AR_host': 'my_ar_host3'}):
test.run_gyp('make_global_settings_ar.gyp',
'-Dcustom_ar_target=my_ar_target3')
verify_ar_target(test, ar='my_ar_target3', rel_path=True)
verify_ar_host(test, ar='my_ar_host3', rel_path=False)
test.pass_test()
| bsd-3-clause | 50f68689e9ac3da224f839e9d4de0faa | 29.460317 | 78 | 0.652944 | 3.055732 | false | true | false | false |
chromium/gyp | pylib/gyp/MSVSUserFile.py | 24 | 5086 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.items()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.items()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| bsd-3-clause | 932b976a2117c04b3def4b3d94bf950b | 33.598639 | 79 | 0.593394 | 4.328511 | false | true | false | false |
ipython/ipython | IPython/utils/path.py | 1 | 11937 | # encoding: utf-8
"""
Utilities for path handling.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import errno
import shutil
import random
import glob
from IPython.utils.process import system
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
fs_encoding = sys.getfilesystemencoding()
def _writable_dir(path):
"""Whether `path` is a directory, to which the user has write access."""
return os.path.isdir(path) and os.access(path, os.W_OK)
if sys.platform == 'win32':
def _get_long_path_name(path):
"""Get a long path name (expand ~) on Windows using ctypes.
Examples
--------
>>> get_long_path_name('c:\\\\docume~1')
'c:\\\\Documents and Settings'
"""
try:
import ctypes
except ImportError as e:
raise ImportError('you need to have ctypes installed for this to work') from e
_GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
_GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p,
ctypes.c_uint ]
buf = ctypes.create_unicode_buffer(260)
rv = _GetLongPathName(path, buf, 260)
if rv == 0 or rv > 260:
return path
else:
return buf.value
else:
def _get_long_path_name(path):
"""Dummy no-op."""
return path
def get_long_path_name(path):
"""Expand a path into its long form.
On Windows this expands any ~ in the paths. On other platforms, it is
a null operation.
"""
return _get_long_path_name(path)
def compress_user(path):
"""Reverse of :func:`os.path.expanduser`
"""
home = os.path.expanduser('~')
if path.startswith(home):
path = "~" + path[len(home):]
return path
def get_py_filename(name):
"""Return a valid python filename in the current directory.
If the given name is not a file, it adds '.py' and searches again.
Raises IOError with an informative message if the file isn't found.
"""
name = os.path.expanduser(name)
if os.path.isfile(name):
return name
if not name.endswith(".py"):
py_name = name + ".py"
if os.path.isfile(py_name):
return py_name
raise IOError("File `%r` not found." % name)
def filefind(filename: str, path_dirs=None) -> str:
"""Find a file by looking through a sequence of paths.
This iterates through a sequence of paths looking for a file and returns
the full, absolute path of the first occurrence of the file. If no set of
path dirs is given, the filename is tested as is, after running through
:func:`expandvars` and :func:`expanduser`. Thus a simple call::
filefind('myfile.txt')
will find the file in the current working dir, but::
filefind('~/myfile.txt')
Will find the file in the users home directory. This function does not
automatically try any paths, such as the cwd or the user's home directory.
Parameters
----------
filename : str
The filename to look for.
path_dirs : str, None or sequence of str
The sequence of paths to look for the file in. If None, the filename
need to be absolute or be in the cwd. If a string, the string is
put into a sequence and the searched. If a sequence, walk through
each element and join with ``filename``, calling :func:`expandvars`
and :func:`expanduser` before testing for existence.
Returns
-------
path : str
returns absolute path to file.
Raises
------
IOError
"""
# If paths are quoted, abspath gets confused, strip them...
filename = filename.strip('"').strip("'")
# If the input is an absolute path, just check it exists
if os.path.isabs(filename) and os.path.isfile(filename):
return filename
if path_dirs is None:
path_dirs = ("",)
elif isinstance(path_dirs, str):
path_dirs = (path_dirs,)
for path in path_dirs:
if path == '.': path = os.getcwd()
testname = expand_path(os.path.join(path, filename))
if os.path.isfile(testname):
return os.path.abspath(testname)
raise IOError("File %r does not exist in any of the search paths: %r" %
(filename, path_dirs) )
class HomeDirError(Exception):
pass
def get_home_dir(require_writable=False) -> str:
"""Return the 'home' directory, as a unicode string.
Uses os.path.expanduser('~'), and checks for writability.
See stdlib docs for how this is determined.
For Python <3.8, $HOME is first priority on *ALL* platforms.
For Python >=3.8 on Windows, %HOME% is no longer considered.
Parameters
----------
require_writable : bool [default: False]
if True:
guarantees the return value is a writable directory, otherwise
raises HomeDirError
if False:
The path is resolved, but it is not guaranteed to exist or be writable.
"""
homedir = os.path.expanduser('~')
# Next line will make things work even when /home/ is a symlink to
# /usr/home as it is on FreeBSD, for example
homedir = os.path.realpath(homedir)
if not _writable_dir(homedir) and os.name == 'nt':
# expanduser failed, use the registry to get the 'My Documents' folder.
try:
import winreg as wreg
with wreg.OpenKey(
wreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
) as key:
homedir = wreg.QueryValueEx(key,'Personal')[0]
except:
pass
if (not require_writable) or _writable_dir(homedir):
assert isinstance(homedir, str), "Homedir should be unicode not bytes"
return homedir
else:
raise HomeDirError('%s is not a writable dir, '
'set $HOME environment variable to override' % homedir)
def get_xdg_dir():
"""Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems.
"""
env = os.environ
if os.name == "posix":
# Linux, Unix, AIX, etc.
# use ~/.config if empty OR not set
xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
if xdg and _writable_dir(xdg):
assert isinstance(xdg, str)
return xdg
return None
def get_xdg_cache_dir():
"""Return the XDG_CACHE_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems.
"""
env = os.environ
if os.name == "posix":
# Linux, Unix, AIX, etc.
# use ~/.cache if empty OR not set
xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache')
if xdg and _writable_dir(xdg):
assert isinstance(xdg, str)
return xdg
return None
def expand_path(s):
"""Expand $VARS and ~names in a string, like a shell
:Examples:
In [2]: os.environ['FOO']='test'
In [3]: expand_path('variable FOO is $FOO')
Out[3]: 'variable FOO is test'
"""
# This is a pretty subtle hack. When expand user is given a UNC path
# on Windows (\\server\share$\%username%), os.path.expandvars, removes
# the $ to get (\\server\share\%username%). I think it considered $
# alone an empty var. But, we need the $ to remains there (it indicates
# a hidden share).
if os.name=='nt':
s = s.replace('$\\', 'IPYTHON_TEMP')
s = os.path.expandvars(os.path.expanduser(s))
if os.name=='nt':
s = s.replace('IPYTHON_TEMP', '$\\')
return s
def unescape_glob(string):
"""Unescape glob pattern in `string`."""
def unescape(s):
for pattern in '*[]!?':
s = s.replace(r'\{0}'.format(pattern), pattern)
return s
return '\\'.join(map(unescape, string.split('\\\\')))
def shellglob(args):
"""
Do glob expansion for each element in `args` and return a flattened list.
Unmatched glob pattern will remain as-is in the returned list.
"""
expanded = []
# Do not unescape backslash in Windows as it is interpreted as
# path separator:
unescape = unescape_glob if sys.platform != 'win32' else lambda x: x
for a in args:
expanded.extend(glob.glob(a) or [unescape(a)])
return expanded
def target_outdated(target,deps):
"""Determine whether a target is out of date.
target_outdated(target,deps) -> 1/0
deps: list of filenames which MUST exist.
target: single filename which may or may not exist.
If target doesn't exist or is older than any file listed in deps, return
true, otherwise return false.
"""
try:
target_time = os.path.getmtime(target)
except os.error:
return 1
for dep in deps:
dep_time = os.path.getmtime(dep)
if dep_time > target_time:
#print "For target",target,"Dep failed:",dep # dbg
#print "times (dep,tar):",dep_time,target_time # dbg
return 1
return 0
def target_update(target,deps,cmd):
"""Update a target with a given command given a list of dependencies.
target_update(target,deps,cmd) -> runs cmd if target is outdated.
This is just a wrapper around target_outdated() which calls the given
command if target is outdated."""
if target_outdated(target,deps):
system(cmd)
ENOLINK = 1998
def link(src, dst):
"""Hard links ``src`` to ``dst``, returning 0 or errno.
Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't
supported by the operating system.
"""
if not hasattr(os, "link"):
return ENOLINK
link_errno = 0
try:
os.link(src, dst)
except OSError as e:
link_errno = e.errno
return link_errno
def link_or_copy(src, dst):
"""Attempts to hardlink ``src`` to ``dst``, copying if the link fails.
Attempts to maintain the semantics of ``shutil.copy``.
Because ``os.link`` does not overwrite files, a unique temporary file
will be used if the target already exists, then that file will be moved
into place.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
link_errno = link(src, dst)
if link_errno == errno.EEXIST:
if os.stat(src).st_ino == os.stat(dst).st_ino:
# dst is already a hard link to the correct file, so we don't need
# to do anything else. If we try to link and rename the file
# anyway, we get duplicate files - see http://bugs.python.org/issue21876
return
new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), )
try:
link_or_copy(src, new_dst)
except:
try:
os.remove(new_dst)
except OSError:
pass
raise
os.rename(new_dst, dst)
elif link_errno != 0:
# Either link isn't supported, or the filesystem doesn't support
# linking, or 'src' and 'dst' are on different filesystems.
shutil.copy(src, dst)
def ensure_dir_exists(path, mode=0o755):
"""ensure that a directory exists
If it doesn't exist, try to create it and protect against a race condition
if another process is doing the same.
The default permissions are 755, which differ from os.makedirs default of 777.
"""
if not os.path.exists(path):
try:
os.makedirs(path, mode=mode)
except OSError as e:
if e.errno != errno.EEXIST:
raise
elif not os.path.isdir(path):
raise IOError("%r exists but is not a directory" % path)
| bsd-3-clause | 51ccb99d6424ad0592e4f10e8d2e680d | 29.529412 | 90 | 0.604758 | 3.890808 | false | false | false | false |
ipython/ipython | IPython/utils/frame.py | 1 | 3048 | # encoding: utf-8
"""
Utilities for working with stack frames.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def extract_vars(*names,**kw):
"""Extract a set of variables by name from another frame.
Parameters
----------
*names : str
One or more variable names which will be extracted from the caller's
frame.
**kw : integer, optional
How many frames in the stack to walk when looking for your variables.
The default is 0, which will use the frame where the call was made.
Examples
--------
::
In [2]: def func(x):
...: y = 1
...: print(sorted(extract_vars('x','y').items()))
...:
In [3]: func('hello')
[('x', 'hello'), ('y', 1)]
"""
depth = kw.get('depth',0)
callerNS = sys._getframe(depth+1).f_locals
return dict((k,callerNS[k]) for k in names)
def extract_vars_above(*names):
"""Extract a set of variables by name from another frame.
Similar to extractVars(), but with a specified depth of 1, so that names
are extracted exactly from above the caller.
This is simply a convenience function so that the very common case (for us)
of skipping exactly 1 frame doesn't have to construct a special dict for
keyword passing."""
callerNS = sys._getframe(2).f_locals
return dict((k,callerNS[k]) for k in names)
def debugx(expr,pre_msg=''):
"""Print the value of an expression from the caller's frame.
Takes an expression, evaluates it in the caller's frame and prints both
the given expression and the resulting value (as well as a debug mark
indicating the name of the calling function. The input must be of a form
suitable for eval().
An optional message can be passed, which will be prepended to the printed
expr->value pair."""
cf = sys._getframe(1)
print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
eval(expr,cf.f_globals,cf.f_locals)))
# deactivate it by uncommenting the following line, which makes it a no-op
#def debugx(expr,pre_msg=''): pass
def extract_module_locals(depth=0):
"""Returns (module, locals) of the function `depth` frames away from the caller"""
f = sys._getframe(depth + 1)
global_ns = f.f_globals
module = sys.modules[global_ns['__name__']]
return (module, f.f_locals)
| bsd-3-clause | 140496a59121839d756072620fd9e345 | 32.130435 | 86 | 0.536417 | 4.508876 | false | false | false | false |
ipython/ipython | IPython/utils/tz.py | 44 | 1352 | # encoding: utf-8
"""
Timezone utilities
Just UTC-awareness right now
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from datetime import tzinfo, timedelta, datetime
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# constant for zero offset
ZERO = timedelta(0)
class tzUTC(tzinfo):
"""tzinfo object for UTC (zero offset)"""
def utcoffset(self, d):
return ZERO
def dst(self, d):
return ZERO
UTC = tzUTC()
def utc_aware(unaware):
"""decorator for adding UTC tzinfo to datetime's utcfoo methods"""
def utc_method(*args, **kwargs):
dt = unaware(*args, **kwargs)
return dt.replace(tzinfo=UTC)
return utc_method
utcfromtimestamp = utc_aware(datetime.utcfromtimestamp)
utcnow = utc_aware(datetime.utcnow)
| bsd-3-clause | cb720515b7bc78b31161bc40b7bf8001 | 28.391304 | 78 | 0.448225 | 5.473684 | false | false | false | false |
ipython/ipython | tools/github_stats.py | 1 | 8425 | #!/usr/bin/env python
"""Simple tools to query github.com and gather stats about issues.
To generate a report for IPython 2.0, run:
python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from subprocess import check_output
from gh_api import (
get_paged_request, make_auth_header, get_pull_request, is_pull_request,
get_milestone_id, get_issues_list, get_authors,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def round_hour(dt):
return dt.replace(minute=0,second=0,microsecond=0)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def split_pulls(all_issues, project="ipython/ipython"):
"""split a list of closed issues into non-PR Issues and Pull Requests"""
pulls = []
issues = []
for i in all_issues:
if is_pull_request(i):
pull = get_pull_request(project, i['number'], auth=True)
pulls.append(pull)
else:
issues.append(i)
return issues, pulls
def issues_closed_since(period=timedelta(days=365), project="ipython/ipython", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against main (backports)
filtered = [i for i in filtered if i["base"]["ref"] == "main"]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title."""
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print(u'* :%s:`%d`: %s' % (role, i['number'],
i['title'].replace(u'`', u'``')))
else:
for i in issues:
print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
if __name__ == "__main__":
print("DEPRECATE: backport_pr.py is deprecated and it is now recommended"
"to install `ghpro` from PyPI.", file=sys.stderr)
# Whether to add reST urls for all issues in printout.
show_urls = True
parser = ArgumentParser()
parser.add_argument('--since-tag', type=str,
help="The git tag to use for the starting point (typically the last major release)."
)
parser.add_argument('--milestone', type=str,
help="The GitHub milestone to use for filtering issues [optional]."
)
parser.add_argument('--days', type=int,
help="The number of days of data to summarize (use this or --since-tag)."
)
parser.add_argument('--project', type=str, default="ipython/ipython",
help="The project to summarize."
)
parser.add_argument('--links', action='store_true', default=False,
help="Include links to all closed Issues and PRs in the output."
)
opts = parser.parse_args()
tag = opts.since_tag
# set `since` from days or git tag
if opts.days:
since = datetime.utcnow() - timedelta(days=opts.days)
else:
if not tag:
tag = check_output(['git', 'describe', '--abbrev=0']).strip().decode('utf8')
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, tz = check_output(cmd).strip().decode('utf8').rsplit(' ', 1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
h = int(tz[1:3])
m = int(tz[3:])
td = timedelta(hours=h, minutes=m)
if tz[0] == '-':
since += td
else:
since -= td
since = round_hour(since)
milestone = opts.milestone
project = opts.project
print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
if milestone:
milestone_id = get_milestone_id(project=project, milestone=milestone,
auth=True)
issues_and_pulls = get_issues_list(project=project,
milestone=milestone_id,
state='closed',
auth=True,
)
issues, pulls = split_pulls(issues_and_pulls, project=project)
else:
issues = issues_closed_since(since, project=project, pulls=False)
pulls = issues_closed_since(since, project=project, pulls=True)
# For regular reports, it's nice to show them in reverse chronological order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
print()
print("These lists are automatically generated, and may be incomplete or contain duplicates.")
print()
ncommits = 0
all_authors = []
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits += len(check_output(cmd).splitlines())
author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
pr_authors = []
for pr in pulls:
pr_authors.extend(get_authors(pr))
ncommits = len(pr_authors) + ncommits - len(pulls)
author_cmd = ['git', 'check-mailmap'] + pr_authors
with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
print("We closed %d issues and merged %d pull requests." % (n_issues, n_pulls))
if milestone:
print("The full list can be seen `on GitHub <https://github.com/{project}/issues?q=milestone%3A{milestone}>`__".format(project=project,milestone=milestone)
)
print()
print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
if opts.links:
print()
print("GitHub issues and pull requests:")
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
| bsd-3-clause | 16b4ab2229458e7a947e14201d6f5eae | 35.630435 | 163 | 0.56095 | 3.850548 | false | false | false | false |
ipython/ipython | IPython/core/inputtransformer2.py | 1 | 28374 | """Input transformer machinery to support IPython special syntax.
This includes the machinery to recognise and transform ``%magic`` commands,
``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
deprecated in 7.0.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
from codeop import CommandCompiler, Compile
import re
import tokenize
from typing import List, Tuple, Optional, Any
import warnings
_indent_re = re.compile(r'^[ \t]+')
def leading_empty_lines(lines):
"""Remove leading empty lines
If the leading lines are empty or contain only whitespace, they will be
removed.
"""
if not lines:
return lines
for i, line in enumerate(lines):
if line and not line.isspace():
return lines[i:]
return lines
def leading_indent(lines):
"""Remove leading indentation.
If the first line starts with a spaces or tabs, the same whitespace will be
removed from each following line in the cell.
"""
if not lines:
return lines
m = _indent_re.match(lines[0])
if not m:
return lines
space = m.group(0)
n = len(space)
return [l[n:] if l.startswith(space) else l
for l in lines]
class PromptStripper:
"""Remove matching input prompts from a block of input.
Parameters
----------
prompt_re : regular expression
A regular expression matching any input prompt (including continuation,
e.g. ``...``)
initial_re : regular expression, optional
A regular expression matching only the initial prompt, but not continuation.
If no initial expression is given, prompt_re will be used everywhere.
Used mainly for plain Python prompts (``>>>``), where the continuation prompt
``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
Notes
-----
If initial_re and prompt_re differ,
only initial_re will be tested against the first line.
If any prompt is found on the first two lines,
prompts will be stripped from the rest of the block.
"""
def __init__(self, prompt_re, initial_re=None):
self.prompt_re = prompt_re
self.initial_re = initial_re or prompt_re
def _strip(self, lines):
return [self.prompt_re.sub('', l, count=1) for l in lines]
def __call__(self, lines):
if not lines:
return lines
if self.initial_re.match(lines[0]) or \
(len(lines) > 1 and self.prompt_re.match(lines[1])):
return self._strip(lines)
return lines
classic_prompt = PromptStripper(
prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
initial_re=re.compile(r'^>>>( |$)')
)
ipython_prompt = PromptStripper(
re.compile(
r"""
^( # Match from the beginning of a line, either:
# 1. First-line prompt:
((\[nav\]|\[ins\])?\ )? # Vi editing mode prompt, if it's there
In\ # The 'In' of the prompt, with a space
\[\d+\]: # Command index, as displayed in the prompt
\ # With a mandatory trailing space
| # ... or ...
# 2. The three dots of the multiline prompt
\s* # All leading whitespace characters
\.{3,}: # The three (or more) dots
\ ? # With an optional trailing space
)
""",
re.VERBOSE,
)
)
def cell_magic(lines):
if not lines or not lines[0].startswith('%%'):
return lines
if re.match(r'%%\w+\?', lines[0]):
# This case will be handled by help_end
return lines
magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
body = ''.join(lines[1:])
return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
% (magic_name, first_line, body)]
def _find_assign_op(token_line) -> Optional[int]:
"""Get the index of the first assignment in the line ('=' not inside brackets)
Note: We don't try to support multiple special assignment (a = b = %foo)
"""
paren_level = 0
for i, ti in enumerate(token_line):
s = ti.string
if s == '=' and paren_level == 0:
return i
if s in {'(','[','{'}:
paren_level += 1
elif s in {')', ']', '}'}:
if paren_level > 0:
paren_level -= 1
return None
def find_end_of_continued_line(lines, start_line: int):
"""Find the last line of a line explicitly extended using backslashes.
Uses 0-indexed line numbers.
"""
end_line = start_line
while lines[end_line].endswith('\\\n'):
end_line += 1
if end_line >= len(lines):
break
return end_line
def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
r"""Assemble a single line from multiple continued line pieces
Continued lines are lines ending in ``\``, and the line following the last
``\`` in the block.
For example, this code continues over multiple lines::
if (assign_ix is not None) \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix+1].string == '%') \
and (line[assign_ix+2].type == tokenize.NAME):
This statement contains four continued line pieces.
Assembling these pieces into a single line would give::
if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
This uses 0-indexed line numbers. *start* is (lineno, colno).
Used to allow ``%magic`` and ``!system`` commands to be continued over
multiple lines.
"""
parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
+ [parts[-1].rstrip()]) # Strip newline from last line
class TokenTransformBase:
"""Base class for transformations which examine tokens.
Special syntax should not be transformed when it occurs inside strings or
comments. This is hard to reliably avoid with regexes. The solution is to
tokenise the code as Python, and recognise the special syntax in the tokens.
IPython's special syntax is not valid Python syntax, so tokenising may go
wrong after the special syntax starts. These classes therefore find and
transform *one* instance of special syntax at a time into regular Python
syntax. After each transformation, tokens are regenerated to find the next
piece of special syntax.
Subclasses need to implement one class method (find)
and one regular method (transform).
The priority attribute can select which transformation to apply if multiple
transformers match in the same place. Lower numbers have higher priority.
This allows "%magic?" to be turned into a help call rather than a magic call.
"""
# Lower numbers -> higher priority (for matches in the same location)
priority = 10
def sortby(self):
return self.start_line, self.start_col, self.priority
def __init__(self, start):
self.start_line = start[0] - 1 # Shift from 1-index to 0-index
self.start_col = start[1]
@classmethod
def find(cls, tokens_by_line):
"""Find one instance of special syntax in the provided tokens.
Tokens are grouped into logical lines for convenience,
so it is easy to e.g. look at the first token of each line.
*tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
This should return an instance of its class, pointing to the start
position it has found, or None if it found no match.
"""
raise NotImplementedError
def transform(self, lines: List[str]):
"""Transform one instance of special syntax found by ``find()``
Takes a list of strings representing physical lines,
returns a similar list of transformed lines.
"""
raise NotImplementedError
class MagicAssign(TokenTransformBase):
"""Transformer for assignments from magics (a = %foo)"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first magic assignment (a = %foo) in the cell.
"""
for line in tokens_by_line:
assign_ix = _find_assign_op(line)
if (assign_ix is not None) \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix+1].string == '%') \
and (line[assign_ix+2].type == tokenize.NAME):
return cls(line[assign_ix+1].start)
def transform(self, lines: List[str]):
"""Transform a magic assignment found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
lhs = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
assert rhs.startswith('%'), rhs
magic_name, _, args = rhs[1:].partition(' ')
lines_before = lines[:start_line]
call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
new_line = lhs + call + '\n'
lines_after = lines[end_line+1:]
return lines_before + [new_line] + lines_after
class SystemAssign(TokenTransformBase):
"""Transformer for assignments from system commands (a = !foo)"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first system assignment (a = !foo) in the cell.
"""
for line in tokens_by_line:
assign_ix = _find_assign_op(line)
if (assign_ix is not None) \
and not line[assign_ix].line.strip().startswith('=') \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
ix = assign_ix + 1
while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
if line[ix].string == '!':
return cls(line[ix].start)
elif not line[ix].string.isspace():
break
ix += 1
def transform(self, lines: List[str]):
"""Transform a system assignment found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
lhs = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
assert rhs.startswith('!'), rhs
cmd = rhs[1:]
lines_before = lines[:start_line]
call = "get_ipython().getoutput({!r})".format(cmd)
new_line = lhs + call + '\n'
lines_after = lines[end_line + 1:]
return lines_before + [new_line] + lines_after
# The escape sequences that define the syntax transformations IPython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
ESC_SH_CAP = '!!' # Send line to system shell and capture output
ESC_HELP = '?' # Find information about object
ESC_HELP2 = '??' # Find extra-detailed information about object
ESC_MAGIC = '%' # Call magic function
ESC_MAGIC2 = '%%' # Call cell-magic function
ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
ESC_QUOTE2 = ';' # Quote all args as a single string, call
ESC_PAREN = '/' # Call first argument with rest of line as arguments
ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
def _make_help_call(target, esc):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return "get_ipython().run_line_magic(%r, %r)" % (t_magic_name, t_magic_arg_s)
def _tr_help(content):
"""Translate lines escaped with: ?
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '?')
def _tr_help2(content):
"""Translate lines escaped with: ??
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '??')
def _tr_magic(content):
"Translate lines escaped with a percent sign: %"
name, _, args = content.partition(' ')
return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
def _tr_quote(content):
"Translate lines escaped with a comma: ,"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, '", "'.join(args.split()) )
def _tr_quote2(content):
"Translate lines escaped with a semicolon: ;"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, args)
def _tr_paren(content):
"Translate lines escaped with a slash: /"
name, _, args = content.partition(' ')
return '%s(%s)' % (name, ", ".join(args.split()))
tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help2,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
class EscapedCommand(TokenTransformBase):
"""Transformer for escaped commands like %foo, !foo, or /foo"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first escaped command (%foo, !foo, etc.) in the cell.
"""
for line in tokens_by_line:
if not line:
continue
ix = 0
ll = len(line)
while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
if ix >= ll:
continue
if line[ix].string in ESCAPE_SINGLES:
return cls(line[ix].start)
def transform(self, lines):
"""Transform an escaped line found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
indent = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
line = assemble_continued_line(lines, (start_line, start_col), end_line)
if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
escape, content = line[:2], line[2:]
else:
escape, content = line[:1], line[1:]
if escape in tr:
call = tr[escape](content)
else:
call = ''
lines_before = lines[:start_line]
new_line = indent + call + '\n'
lines_after = lines[end_line + 1:]
return lines_before + [new_line] + lines_after
_help_end_re = re.compile(
r"""(%{0,2}
(?!\d)[\w*]+ # Variable name
(\.(?!\d)[\w*]+|\[-?[0-9]+\])* # .etc.etc or [0], we only support literal integers.
)
(\?\??)$ # ? or ??
""",
re.VERBOSE,
)
class HelpEnd(TokenTransformBase):
"""Transformer for help syntax: obj? and obj??"""
# This needs to be higher priority (lower number) than EscapedCommand so
# that inspecting magics (%foo?) works.
priority = 5
def __init__(self, start, q_locn):
super().__init__(start)
self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
self.q_col = q_locn[1]
@classmethod
def find(cls, tokens_by_line):
"""Find the first help command (foo?) in the cell.
"""
for line in tokens_by_line:
# Last token is NEWLINE; look at last but one
if len(line) > 2 and line[-2].string == '?':
# Find the first token that's not INDENT/DEDENT
ix = 0
while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
return cls(line[ix].start, line[-2].start)
def transform(self, lines):
"""Transform a help command found by the ``find()`` classmethod.
"""
piece = "".join(lines[self.start_line : self.q_line + 1])
indent, content = piece[: self.start_col], piece[self.start_col :]
lines_before = lines[: self.start_line]
lines_after = lines[self.q_line + 1 :]
m = _help_end_re.search(content)
if not m:
raise SyntaxError(content)
assert m is not None, content
target = m.group(1)
esc = m.group(3)
call = _make_help_call(target, esc)
new_line = indent + call + '\n'
return lines_before + [new_line] + lines_after
def make_tokens_by_line(lines:List[str]):
"""Tokenize a series of lines and group tokens by line.
The tokens for a multiline Python string or expression are grouped as one
line. All lines except the last lines should keep their line ending ('\\n',
'\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
for example when passing block of text to this function.
"""
# NL tokens are used inside multiline expressions, but also after blank
# lines or comments. This is intentional - see https://bugs.python.org/issue17061
# We want to group the former case together but split the latter, so we
# track parentheses level, similar to the internals of tokenize.
# reexported from token on 3.7+
NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
tokens_by_line: List[List[Any]] = [[]]
if len(lines) > 1 and not lines[0].endswith(("\n", "\r", "\r\n", "\x0b", "\x0c")):
warnings.warn(
"`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified",
stacklevel=2,
)
parenlev = 0
try:
for token in tokenize.generate_tokens(iter(lines).__next__):
tokens_by_line[-1].append(token)
if (token.type == NEWLINE) \
or ((token.type == NL) and (parenlev <= 0)):
tokens_by_line.append([])
elif token.string in {'(', '[', '{'}:
parenlev += 1
elif token.string in {')', ']', '}'}:
if parenlev > 0:
parenlev -= 1
except tokenize.TokenError:
# Input ended in a multiline string or expression. That's OK for us.
pass
if not tokens_by_line[-1]:
tokens_by_line.pop()
return tokens_by_line
def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
"""Check if the depth of brackets in the list of tokens drops below 0"""
parenlev = 0
for token in tokens:
if token.string in {"(", "[", "{"}:
parenlev += 1
elif token.string in {")", "]", "}"}:
parenlev -= 1
if parenlev < 0:
return True
return False
def show_linewise_tokens(s: str):
"""For investigation and debugging"""
warnings.warn(
"show_linewise_tokens is deprecated since IPython 8.6",
DeprecationWarning,
stacklevel=2,
)
if not s.endswith("\n"):
s += "\n"
lines = s.splitlines(keepends=True)
for line in make_tokens_by_line(lines):
print("Line -------")
for tokinfo in line:
print(" ", tokinfo)
# Arbitrary limit to prevent getting stuck in infinite loops
TRANSFORM_LOOP_LIMIT = 500
class TransformerManager:
"""Applies various transformations to a cell or code block.
The key methods for external use are ``transform_cell()``
and ``check_complete()``.
"""
def __init__(self):
self.cleanup_transforms = [
leading_empty_lines,
leading_indent,
classic_prompt,
ipython_prompt,
]
self.line_transforms = [
cell_magic,
]
self.token_transformers = [
MagicAssign,
SystemAssign,
EscapedCommand,
HelpEnd,
]
def do_one_token_transform(self, lines):
"""Find and run the transform earliest in the code.
Returns (changed, lines).
This method is called repeatedly until changed is False, indicating
that all available transformations are complete.
The tokens following IPython special syntax might not be valid, so
the transformed code is retokenised every time to identify the next
piece of special syntax. Hopefully long code cells are mostly valid
Python, not using lots of IPython special syntax, so this shouldn't be
a performance issue.
"""
tokens_by_line = make_tokens_by_line(lines)
candidates = []
for transformer_cls in self.token_transformers:
transformer = transformer_cls.find(tokens_by_line)
if transformer:
candidates.append(transformer)
if not candidates:
# Nothing to transform
return False, lines
ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
for transformer in ordered_transformers:
try:
return True, transformer.transform(lines)
except SyntaxError:
pass
return False, lines
def do_token_transforms(self, lines):
for _ in range(TRANSFORM_LOOP_LIMIT):
changed, lines = self.do_one_token_transform(lines)
if not changed:
return lines
raise RuntimeError("Input transformation still changing after "
"%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
def transform_cell(self, cell: str) -> str:
"""Transforms a cell of input code"""
if not cell.endswith('\n'):
cell += '\n' # Ensure the cell has a trailing newline
lines = cell.splitlines(keepends=True)
for transform in self.cleanup_transforms + self.line_transforms:
lines = transform(lines)
lines = self.do_token_transforms(lines)
return ''.join(lines)
def check_complete(self, cell: str):
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
cell : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
# Remember if the lines ends in a new line.
ends_with_newline = False
for character in reversed(cell):
if character == '\n':
ends_with_newline = True
break
elif character.strip():
break
else:
continue
if not ends_with_newline:
# Append an newline for consistent tokenization
# See https://bugs.python.org/issue33899
cell += '\n'
lines = cell.splitlines(keepends=True)
if not lines:
return 'complete', None
if lines[-1].endswith('\\'):
# Explicit backslash continuation
return 'incomplete', find_last_indent(lines)
try:
for transform in self.cleanup_transforms:
if not getattr(transform, 'has_side_effects', False):
lines = transform(lines)
except SyntaxError:
return 'invalid', None
if lines[0].startswith('%%'):
# Special case for cell magics - completion marked by blank line
if lines[-1].strip():
return 'incomplete', find_last_indent(lines)
else:
return 'complete', None
try:
for transform in self.line_transforms:
if not getattr(transform, 'has_side_effects', False):
lines = transform(lines)
lines = self.do_token_transforms(lines)
except SyntaxError:
return 'invalid', None
tokens_by_line = make_tokens_by_line(lines)
# Bail if we got one line and there are more closing parentheses than
# the opening ones
if (
len(lines) == 1
and tokens_by_line
and has_sunken_brackets(tokens_by_line[0])
):
return "invalid", None
if not tokens_by_line:
return 'incomplete', find_last_indent(lines)
if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
# We're in a multiline string or expression
return 'incomplete', find_last_indent(lines)
newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
# Pop the last line which only contains DEDENTs and ENDMARKER
last_token_line = None
if {t.type for t in tokens_by_line[-1]} in [
{tokenize.DEDENT, tokenize.ENDMARKER},
{tokenize.ENDMARKER}
] and len(tokens_by_line) > 1:
last_token_line = tokens_by_line.pop()
while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
tokens_by_line[-1].pop()
if not tokens_by_line[-1]:
return 'incomplete', find_last_indent(lines)
if tokens_by_line[-1][-1].string == ':':
# The last line starts a block (e.g. 'if foo:')
ix = 0
while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
indent = tokens_by_line[-1][ix].start[1]
return 'incomplete', indent + 4
if tokens_by_line[-1][0].line.endswith('\\'):
return 'incomplete', None
# At this point, our checks think the code is complete (or invalid).
# We'll use codeop.compile_command to check this with the real parser
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
res = compile_command(''.join(lines), symbol='exec')
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
return 'invalid', None
else:
if res is None:
return 'incomplete', find_last_indent(lines)
if last_token_line and last_token_line[0].type == tokenize.DEDENT:
if ends_with_newline:
return 'complete', None
return 'incomplete', find_last_indent(lines)
# If there's a blank line at the end, assume we're ready to execute
if not lines[-1].strip():
return 'complete', None
return 'complete', None
def find_last_indent(lines):
m = _indent_re.match(lines[-1])
if not m:
return 0
return len(m.group(0).replace('\t', ' '*4))
class MaybeAsyncCompile(Compile):
def __init__(self, extra_flags=0):
super().__init__()
self.flags |= extra_flags
class MaybeAsyncCommandCompiler(CommandCompiler):
def __init__(self, extra_flags=0):
self.compiler = MaybeAsyncCompile(extra_flags=extra_flags)
_extra_flags = ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
compile_command = MaybeAsyncCommandCompiler(extra_flags=_extra_flags)
| bsd-3-clause | 8051f4f3e27c481bbbe29a6471eb0560 | 34.601004 | 171 | 0.581448 | 4.014999 | false | false | false | false |
ipython/ipython | examples/IPython Kernel/ipython-get-history.py | 1 | 1136 | #!/usr/bin/env python
"""Extract a session from the IPython input history.
Usage:
ipython-get-history.py sessionnumber [outputfile]
If outputfile is not given, the relevant history is written to stdout. If
outputfile has a .py extension, the translated history (without IPython's
special syntax) will be extracted.
Example:
./ipython-get-history.py 57 record.ipy
This script is a simple demonstration of HistoryAccessor. It should be possible
to build much more flexible and powerful tools to browse and pull from the
history database.
"""
import sys
from pathlib import Path
from IPython.core.history import HistoryAccessor
session_number = int(sys.argv[1])
if len(sys.argv) > 2:
filepath = Path(sys.argv[2])
dest = open(filepath, "w", encoding="utf-8")
raw = not filepath.name.endswith(".py")
else:
dest = sys.stdout
raw = True
with dest:
dest.write("# coding: utf-8\n")
# Profiles other than 'default' can be specified here with a profile= argument:
hist = HistoryAccessor()
for session, lineno, cell in hist.get_range(session=session_number, raw=raw):
dest.write(cell + '\n')
| bsd-3-clause | c529b5345687c19586f41fa4407cb6ac | 27.4 | 83 | 0.725352 | 3.664516 | false | false | false | false |
ipython/ipython | IPython/core/tests/test_inputtransformer2_line.py | 1 | 2971 | """Tests for the line-based transformers in IPython.core.inputtransformer2
Line-based transformers are the simpler ones; token-based transformers are
more complex. See test_inputtransformer2 for tests for token-based transformers.
"""
from IPython.core import inputtransformer2 as ipt2
CELL_MAGIC = ("""\
%%foo arg
body 1
body 2
""", """\
get_ipython().run_cell_magic('foo', 'arg', 'body 1\\nbody 2\\n')
""")
def test_cell_magic():
for sample, expected in [CELL_MAGIC]:
assert ipt2.cell_magic(sample.splitlines(keepends=True)) == expected.splitlines(
keepends=True
)
CLASSIC_PROMPT = ("""\
>>> for a in range(5):
... print(a)
""", """\
for a in range(5):
print(a)
""")
CLASSIC_PROMPT_L2 = ("""\
for a in range(5):
... print(a)
... print(a ** 2)
""", """\
for a in range(5):
print(a)
print(a ** 2)
""")
def test_classic_prompt():
for sample, expected in [CLASSIC_PROMPT, CLASSIC_PROMPT_L2]:
assert ipt2.classic_prompt(
sample.splitlines(keepends=True)
) == expected.splitlines(keepends=True)
IPYTHON_PROMPT = ("""\
In [1]: for a in range(5):
...: print(a)
""", """\
for a in range(5):
print(a)
""")
IPYTHON_PROMPT_L2 = ("""\
for a in range(5):
...: print(a)
...: print(a ** 2)
""", """\
for a in range(5):
print(a)
print(a ** 2)
""")
IPYTHON_PROMPT_VI_INS = (
"""\
[ins] In [11]: def a():
...: 123
...:
...: 123
""",
"""\
def a():
123
123
""",
)
IPYTHON_PROMPT_VI_NAV = (
"""\
[nav] In [11]: def a():
...: 123
...:
...: 123
""",
"""\
def a():
123
123
""",
)
def test_ipython_prompt():
for sample, expected in [
IPYTHON_PROMPT,
IPYTHON_PROMPT_L2,
IPYTHON_PROMPT_VI_INS,
IPYTHON_PROMPT_VI_NAV,
]:
assert ipt2.ipython_prompt(
sample.splitlines(keepends=True)
) == expected.splitlines(keepends=True)
INDENT_SPACES = ("""\
if True:
a = 3
""", """\
if True:
a = 3
""")
INDENT_TABS = ("""\
\tif True:
\t\tb = 4
""", """\
if True:
\tb = 4
""")
def test_leading_indent():
for sample, expected in [INDENT_SPACES, INDENT_TABS]:
assert ipt2.leading_indent(
sample.splitlines(keepends=True)
) == expected.splitlines(keepends=True)
LEADING_EMPTY_LINES = ("""\
\t
if True:
a = 3
b = 4
""", """\
if True:
a = 3
b = 4
""")
ONLY_EMPTY_LINES = ("""\
\t
""", """\
\t
""")
def test_leading_empty_lines():
for sample, expected in [LEADING_EMPTY_LINES, ONLY_EMPTY_LINES]:
assert ipt2.leading_empty_lines(
sample.splitlines(keepends=True)
) == expected.splitlines(keepends=True)
CRLF_MAGIC = ([
"%%ls\r\n"
], [
"get_ipython().run_cell_magic('ls', '', '')\n"
])
def test_crlf_magic():
for sample, expected in [CRLF_MAGIC]:
assert ipt2.cell_magic(sample) == expected
| bsd-3-clause | b549096b76bb1eb869bfc221305e9018 | 16.790419 | 88 | 0.539886 | 2.99496 | false | true | false | false |
ipython/ipython | IPython/core/release.py | 1 | 2179 | # -*- coding: utf-8 -*-
"""Release data for the IPython project."""
#-----------------------------------------------------------------------------
# Copyright (c) 2008, IPython Development Team.
# Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# IPython version information. An empty _version_extra corresponds to a full
# release. 'dev' as a _version_extra string means this is a development
# version
_version_major = 8
_version_minor = 7
_version_patch = 0
_version_extra = ".dev"
# _version_extra = "rc1"
# _version_extra = "" # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor, _version_patch]
__version__ = '.'.join(map(str, _ver))
if _version_extra:
__version__ = __version__ + _version_extra
version = __version__ # backwards compatibility name
version_info = (_version_major, _version_minor, _version_patch, _version_extra)
# Change this when incrementing the kernel protocol version
kernel_protocol_version_info = (5, 0)
kernel_protocol_version = "%i.%i" % kernel_protocol_version_info
license = "BSD-3-Clause"
authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com'),
'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'),
'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'),
'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'),
'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'),
}
author = 'The IPython Development Team'
author_email = 'ipython-dev@python.org'
| bsd-3-clause | 0a9488f0a18f1cdf92f15ac3c1618c1e | 39.351852 | 80 | 0.611749 | 3.213864 | false | false | false | false |
ipython/ipython | IPython/terminal/pt_inputhooks/osx.py | 1 | 4789 | """Inputhook for OS X
Calls NSApp / CoreFoundation APIs via ctypes.
"""
# obj-c boilerplate from appnope, used under BSD 2-clause
import ctypes
import ctypes.util
from threading import Event
objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("objc")) # type: ignore
void_p = ctypes.c_void_p
objc.objc_getClass.restype = void_p
objc.sel_registerName.restype = void_p
objc.objc_msgSend.restype = void_p
objc.objc_msgSend.argtypes = [void_p, void_p]
msg = objc.objc_msgSend
def _utf8(s):
"""ensure utf8 bytes"""
if not isinstance(s, bytes):
s = s.encode('utf8')
return s
def n(name):
"""create a selector name (for ObjC methods)"""
return objc.sel_registerName(_utf8(name))
def C(classname):
"""get an ObjC Class by name"""
return objc.objc_getClass(_utf8(classname))
# end obj-c boilerplate from appnope
# CoreFoundation C-API calls we will use:
CoreFoundation = ctypes.cdll.LoadLibrary(ctypes.util.find_library("CoreFoundation")) # type: ignore
CFFileDescriptorCreate = CoreFoundation.CFFileDescriptorCreate
CFFileDescriptorCreate.restype = void_p
CFFileDescriptorCreate.argtypes = [void_p, ctypes.c_int, ctypes.c_bool, void_p, void_p]
CFFileDescriptorGetNativeDescriptor = CoreFoundation.CFFileDescriptorGetNativeDescriptor
CFFileDescriptorGetNativeDescriptor.restype = ctypes.c_int
CFFileDescriptorGetNativeDescriptor.argtypes = [void_p]
CFFileDescriptorEnableCallBacks = CoreFoundation.CFFileDescriptorEnableCallBacks
CFFileDescriptorEnableCallBacks.restype = None
CFFileDescriptorEnableCallBacks.argtypes = [void_p, ctypes.c_ulong]
CFFileDescriptorCreateRunLoopSource = CoreFoundation.CFFileDescriptorCreateRunLoopSource
CFFileDescriptorCreateRunLoopSource.restype = void_p
CFFileDescriptorCreateRunLoopSource.argtypes = [void_p, void_p, void_p]
CFRunLoopGetCurrent = CoreFoundation.CFRunLoopGetCurrent
CFRunLoopGetCurrent.restype = void_p
CFRunLoopAddSource = CoreFoundation.CFRunLoopAddSource
CFRunLoopAddSource.restype = None
CFRunLoopAddSource.argtypes = [void_p, void_p, void_p]
CFRelease = CoreFoundation.CFRelease
CFRelease.restype = None
CFRelease.argtypes = [void_p]
CFFileDescriptorInvalidate = CoreFoundation.CFFileDescriptorInvalidate
CFFileDescriptorInvalidate.restype = None
CFFileDescriptorInvalidate.argtypes = [void_p]
# From CFFileDescriptor.h
kCFFileDescriptorReadCallBack = 1
kCFRunLoopCommonModes = void_p.in_dll(CoreFoundation, 'kCFRunLoopCommonModes')
def _NSApp():
"""Return the global NSApplication instance (NSApp)"""
objc.objc_msgSend.argtypes = [void_p, void_p]
return msg(C('NSApplication'), n('sharedApplication'))
def _wake(NSApp):
"""Wake the Application"""
objc.objc_msgSend.argtypes = [
void_p,
void_p,
void_p,
void_p,
void_p,
void_p,
void_p,
void_p,
void_p,
void_p,
void_p,
]
event = msg(
C("NSEvent"),
n(
"otherEventWithType:location:modifierFlags:"
"timestamp:windowNumber:context:subtype:data1:data2:"
),
15, # Type
0, # location
0, # flags
0, # timestamp
0, # window
None, # context
0, # subtype
0, # data1
0, # data2
)
objc.objc_msgSend.argtypes = [void_p, void_p, void_p, void_p]
msg(NSApp, n('postEvent:atStart:'), void_p(event), True)
_triggered = Event()
def _input_callback(fdref, flags, info):
"""Callback to fire when there's input to be read"""
_triggered.set()
CFFileDescriptorInvalidate(fdref)
CFRelease(fdref)
NSApp = _NSApp()
objc.objc_msgSend.argtypes = [void_p, void_p, void_p]
msg(NSApp, n('stop:'), NSApp)
_wake(NSApp)
_c_callback_func_type = ctypes.CFUNCTYPE(None, void_p, void_p, void_p)
_c_input_callback = _c_callback_func_type(_input_callback)
def _stop_on_read(fd):
"""Register callback to stop eventloop when there's data on fd"""
_triggered.clear()
fdref = CFFileDescriptorCreate(None, fd, False, _c_input_callback, None)
CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack)
source = CFFileDescriptorCreateRunLoopSource(None, fdref, 0)
loop = CFRunLoopGetCurrent()
CFRunLoopAddSource(loop, source, kCFRunLoopCommonModes)
CFRelease(source)
def inputhook(context):
"""Inputhook for Cocoa (NSApp)"""
NSApp = _NSApp()
_stop_on_read(context.fileno())
objc.objc_msgSend.argtypes = [void_p, void_p]
msg(NSApp, n('run'))
if not _triggered.is_set():
# app closed without firing callback,
# probably due to last window being closed.
# Run the loop manually in this case,
# since there may be events still to process (#9734)
CoreFoundation.CFRunLoopRun()
| bsd-3-clause | 9f6bc4d3772d385a4ab63898a2861a51 | 29.503185 | 100 | 0.704322 | 3.377292 | false | false | false | false |
ipython/ipython | IPython/lib/guisupport.py | 3 | 6305 | # coding: utf-8
"""
Support for creating GUI apps and starting event loops.
IPython's GUI integration allows interactive plotting and GUI usage in IPython
session. IPython has two different types of GUI integration:
1. The terminal based IPython supports GUI event loops through Python's
PyOS_InputHook. PyOS_InputHook is a hook that Python calls periodically
whenever raw_input is waiting for a user to type code. We implement GUI
support in the terminal by setting PyOS_InputHook to a function that
iterates the event loop for a short while. It is important to note that
in this situation, the real GUI event loop is NOT run in the normal
manner, so you can't use the normal means to detect that it is running.
2. In the two process IPython kernel/frontend, the GUI event loop is run in
the kernel. In this case, the event loop is run in the normal manner by
calling the function or method of the GUI toolkit that starts the event
loop.
In addition to starting the GUI event loops in one of these two ways, IPython
will *always* create an appropriate GUI application object when GUi
integration is enabled.
If you want your GUI apps to run in IPython you need to do two things:
1. Test to see if there is already an existing main application object. If
there is, you should use it. If there is not an existing application object
you should create one.
2. Test to see if the GUI event loop is running. If it is, you should not
start it. If the event loop is not running you may start it.
This module contains functions for each toolkit that perform these things
in a consistent manner. Because of how PyOS_InputHook runs the event loop
you cannot detect if the event loop is running using the traditional calls
(such as ``wx.GetApp.IsMainLoopRunning()`` in wxPython). If PyOS_InputHook is
set These methods will return a false negative. That is, they will say the
event loop is not running, when is actually is. To work around this limitation
we proposed the following informal protocol:
* Whenever someone starts the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``True``. This should be done
regardless of how the event loop is actually run.
* Whenever someone stops the event loop, they *must* set the ``_in_event_loop``
attribute of the main application object to ``False``.
* If you want to see if the event loop is running, you *must* use ``hasattr``
to see if ``_in_event_loop`` attribute has been set. If it is set, you
*must* use its value. If it has not been set, you can query the toolkit
in the normal manner.
* If you want GUI support and no one else has created an application or
started the event loop you *must* do this. We don't want projects to
attempt to defer these things to someone else if they themselves need it.
The functions below implement this logic for each GUI toolkit. If you need
to create custom application subclasses, you will likely have to modify this
code for your own purposes. This code can be copied into your own project
so you don't have to depend on IPython.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.core.getipython import get_ipython
#-----------------------------------------------------------------------------
# wx
#-----------------------------------------------------------------------------
def get_app_wx(*args, **kwargs):
"""Create a new wx app or return an exiting one."""
import wx
app = wx.GetApp()
if app is None:
if 'redirect' not in kwargs:
kwargs['redirect'] = False
app = wx.PySimpleApp(*args, **kwargs)
return app
def is_event_loop_running_wx(app=None):
"""Is the wx event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
if ip.active_eventloop and ip.active_eventloop == 'wx':
return True
# Fall through to checking the application, because Wx has a native way
# to check if the event loop is running, unlike Qt.
# Old way: check Wx application
if app is None:
app = get_app_wx()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return app.IsMainLoopRunning()
def start_event_loop_wx(app=None):
"""Start the wx event loop in a consistent manner."""
if app is None:
app = get_app_wx()
if not is_event_loop_running_wx(app):
app._in_event_loop = True
app.MainLoop()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# qt4
#-----------------------------------------------------------------------------
def get_app_qt4(*args, **kwargs):
"""Create a new qt4 app or return an existing one."""
from IPython.external.qt_for_kernel import QtGui
app = QtGui.QApplication.instance()
if app is None:
if not args:
args = ([''],)
app = QtGui.QApplication(*args, **kwargs)
return app
def is_event_loop_running_qt4(app=None):
"""Is the qt4 event loop running."""
# New way: check attribute on shell instance
ip = get_ipython()
if ip is not None:
return ip.active_eventloop and ip.active_eventloop.startswith('qt')
# Old way: check attribute on QApplication singleton
if app is None:
app = get_app_qt4([''])
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
# Does qt4 provide a other way to detect this?
return False
def start_event_loop_qt4(app=None):
"""Start the qt4 event loop in a consistent manner."""
if app is None:
app = get_app_qt4([''])
if not is_event_loop_running_qt4(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
#-----------------------------------------------------------------------------
# Tk
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# gtk
#-----------------------------------------------------------------------------
| bsd-3-clause | efafd2f4a2d0dfce963fd5951df77706 | 39.677419 | 80 | 0.625852 | 4.306694 | false | false | false | false |
ipython/ipython | IPython/external/qt_loaders.py | 1 | 11158 | """
This module contains factory functions that attempt
to return Qt submodules from the various python Qt bindings.
It also protects against double-importing Qt with different
bindings, which is unstable and likely to crash
This is used primarily by qt and qt_for_kernel, and shouldn't
be accessed directly from the outside
"""
import importlib.abc
import sys
import types
from functools import partial, lru_cache
import operator
# ### Available APIs.
# Qt6
QT_API_PYQT6 = "pyqt6"
QT_API_PYSIDE6 = "pyside6"
# Qt5
QT_API_PYQT5 = 'pyqt5'
QT_API_PYSIDE2 = 'pyside2'
# Qt4
QT_API_PYQT = "pyqt" # Force version 2
QT_API_PYQTv1 = "pyqtv1" # Force version 2
QT_API_PYSIDE = "pyside"
QT_API_PYQT_DEFAULT = "pyqtdefault" # use system default for version 1 vs. 2
api_to_module = {
# Qt6
QT_API_PYQT6: "PyQt6",
QT_API_PYSIDE6: "PySide6",
# Qt5
QT_API_PYQT5: "PyQt5",
QT_API_PYSIDE2: "PySide2",
# Qt4
QT_API_PYSIDE: "PySide",
QT_API_PYQT: "PyQt4",
QT_API_PYQTv1: "PyQt4",
# default
QT_API_PYQT_DEFAULT: "PyQt6",
}
class ImportDenier(importlib.abc.MetaPathFinder):
"""Import Hook that will guard against bad Qt imports
once IPython commits to a specific binding
"""
def __init__(self):
self.__forbidden = set()
def forbid(self, module_name):
sys.modules.pop(module_name, None)
self.__forbidden.add(module_name)
def find_spec(self, fullname, path, target=None):
if path:
return
if fullname in self.__forbidden:
raise ImportError(
"""
Importing %s disabled by IPython, which has
already imported an Incompatible QT Binding: %s
"""
% (fullname, loaded_api())
)
ID = ImportDenier()
sys.meta_path.insert(0, ID)
def commit_api(api):
"""Commit to a particular API, and trigger ImportErrors on subsequent
dangerous imports"""
modules = set(api_to_module.values())
modules.remove(api_to_module[api])
for mod in modules:
ID.forbid(mod)
def loaded_api():
"""Return which API is loaded, if any
If this returns anything besides None,
importing any other Qt binding is unsafe.
Returns
-------
None, 'pyside6', 'pyqt6', 'pyside2', 'pyside', 'pyqt', 'pyqt5', 'pyqtv1'
"""
if sys.modules.get("PyQt6.QtCore"):
return QT_API_PYQT6
elif sys.modules.get("PySide6.QtCore"):
return QT_API_PYSIDE6
elif sys.modules.get("PyQt5.QtCore"):
return QT_API_PYQT5
elif sys.modules.get("PySide2.QtCore"):
return QT_API_PYSIDE2
elif sys.modules.get("PyQt4.QtCore"):
if qtapi_version() == 2:
return QT_API_PYQT
else:
return QT_API_PYQTv1
elif sys.modules.get("PySide.QtCore"):
return QT_API_PYSIDE
return None
def has_binding(api):
"""Safely check for PyQt4/5, PySide or PySide2, without importing submodules
Parameters
----------
api : str [ 'pyqtv1' | 'pyqt' | 'pyqt5' | 'pyside' | 'pyside2' | 'pyqtdefault']
Which module to check for
Returns
-------
True if the relevant module appears to be importable
"""
module_name = api_to_module[api]
from importlib.util import find_spec
required = ['QtCore', 'QtGui', 'QtSvg']
if api in (QT_API_PYQT5, QT_API_PYSIDE2, QT_API_PYQT6, QT_API_PYSIDE6):
# QT5 requires QtWidgets too
required.append('QtWidgets')
for submod in required:
try:
spec = find_spec('%s.%s' % (module_name, submod))
except ImportError:
# Package (e.g. PyQt5) not found
return False
else:
if spec is None:
# Submodule (e.g. PyQt5.QtCore) not found
return False
if api == QT_API_PYSIDE:
# We can also safely check PySide version
import PySide
return PySide.__version_info__ >= (1, 0, 3)
return True
def qtapi_version():
"""Return which QString API has been set, if any
Returns
-------
The QString API version (1 or 2), or None if not set
"""
try:
import sip
except ImportError:
# as of PyQt5 5.11, sip is no longer available as a top-level
# module and needs to be imported from the PyQt5 namespace
try:
from PyQt5 import sip
except ImportError:
return
try:
return sip.getapi('QString')
except ValueError:
return
def can_import(api):
"""Safely query whether an API is importable, without importing it"""
if not has_binding(api):
return False
current = loaded_api()
if api == QT_API_PYQT_DEFAULT:
return current in [QT_API_PYQT6, None]
else:
return current in [api, None]
def import_pyqt4(version=2):
"""
Import PyQt4
Parameters
----------
version : 1, 2, or None
Which QString/QVariant API to use. Set to None to use the system
default
ImportErrors raised within this function are non-recoverable
"""
# The new-style string API (version=2) automatically
# converts QStrings to Unicode Python strings. Also, automatically unpacks
# QVariants to their underlying objects.
import sip
if version is not None:
sip.setapi('QString', version)
sip.setapi('QVariant', version)
from PyQt4 import QtGui, QtCore, QtSvg
if QtCore.PYQT_VERSION < 0x040700:
raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
QtCore.PYQT_VERSION_STR)
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# query for the API version (in case version == None)
version = sip.getapi('QString')
api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
return QtCore, QtGui, QtSvg, api
def import_pyqt5():
"""
Import PyQt5
ImportErrors raised within this function are non-recoverable
"""
from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType('QtGuiCompat')
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
api = QT_API_PYQT5
return QtCore, QtGuiCompat, QtSvg, api
def import_pyqt6():
"""
Import PyQt6
ImportErrors raised within this function are non-recoverable
"""
from PyQt6 import QtCore, QtSvg, QtWidgets, QtGui
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType("QtGuiCompat")
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
api = QT_API_PYQT6
return QtCore, QtGuiCompat, QtSvg, api
def import_pyside():
"""
Import PySide
ImportErrors raised within this function are non-recoverable
"""
from PySide import QtGui, QtCore, QtSvg
return QtCore, QtGui, QtSvg, QT_API_PYSIDE
def import_pyside2():
"""
Import PySide2
ImportErrors raised within this function are non-recoverable
"""
from PySide2 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType('QtGuiCompat')
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE2
def import_pyside6():
"""
Import PySide6
ImportErrors raised within this function are non-recoverable
"""
from PySide6 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
# Join QtGui and QtWidgets for Qt4 compatibility.
QtGuiCompat = types.ModuleType("QtGuiCompat")
QtGuiCompat.__dict__.update(QtGui.__dict__)
QtGuiCompat.__dict__.update(QtWidgets.__dict__)
QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE6
def load_qt(api_options):
"""
Attempt to import Qt, given a preference list
of permissible bindings
It is safe to call this function multiple times.
Parameters
----------
api_options : List of strings
The order of APIs to try. Valid items are 'pyside', 'pyside2',
'pyqt', 'pyqt5', 'pyqtv1' and 'pyqtdefault'
Returns
-------
A tuple of QtCore, QtGui, QtSvg, QT_API
The first three are the Qt modules. The last is the
string indicating which module was loaded.
Raises
------
ImportError, if it isn't possible to import any requested
bindings (either because they aren't installed, or because
an incompatible library has already been installed)
"""
loaders = {
# Qt6
QT_API_PYQT6: import_pyqt6,
QT_API_PYSIDE6: import_pyside6,
# Qt5
QT_API_PYQT5: import_pyqt5,
QT_API_PYSIDE2: import_pyside2,
# Qt4
QT_API_PYSIDE: import_pyside,
QT_API_PYQT: import_pyqt4,
QT_API_PYQTv1: partial(import_pyqt4, version=1),
# default
QT_API_PYQT_DEFAULT: import_pyqt6,
}
for api in api_options:
if api not in loaders:
raise RuntimeError(
"Invalid Qt API %r, valid values are: %s" %
(api, ", ".join(["%r" % k for k in loaders.keys()])))
if not can_import(api):
continue
#cannot safely recover from an ImportError during this
result = loaders[api]()
api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
commit_api(api)
return result
else:
raise ImportError("""
Could not load requested Qt binding. Please ensure that
PyQt4 >= 4.7, PyQt5, PySide >= 1.0.3 or PySide2 is available,
and only one is imported per session.
Currently-imported Qt library: %r
PyQt4 available (requires QtCore, QtGui, QtSvg): %s
PyQt5 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
PySide >= 1.0.3 installed: %s
PySide2 installed: %s
Tried to load: %r
""" % (loaded_api(),
has_binding(QT_API_PYQT),
has_binding(QT_API_PYQT5),
has_binding(QT_API_PYSIDE),
has_binding(QT_API_PYSIDE2),
api_options))
def enum_factory(QT_API, QtCore):
"""Construct an enum helper to account for PyQt5 <-> PyQt6 changes."""
@lru_cache(None)
def _enum(name):
# foo.bar.Enum.Entry (PyQt6) <=> foo.bar.Entry (non-PyQt6).
return operator.attrgetter(
name if QT_API == QT_API_PYQT6 else name.rpartition(".")[0]
)(sys.modules[QtCore.__package__])
return _enum
| bsd-3-clause | 7548a9e8544ff5e72ca3f95595175ee8 | 26.964912 | 83 | 0.6209 | 3.67402 | false | false | false | false |
ipython/ipython | IPython/terminal/pt_inputhooks/wx.py | 2 | 7132 | """Enable wxPython to be used interactively in prompt_toolkit
"""
import sys
import signal
import time
from timeit import default_timer as clock
import wx
def ignore_keyboardinterrupts(func):
"""Decorator which causes KeyboardInterrupt exceptions to be ignored during
execution of the decorated function.
This is used by the inputhook functions to handle the event where the user
presses CTRL+C while IPython is idle, and the inputhook loop is running. In
this case, we want to ignore interrupts.
"""
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except KeyboardInterrupt:
pass
return wrapper
@ignore_keyboardinterrupts
def inputhook_wx1(context):
"""Run the wx event loop by processing pending events only.
This approach seems to work, but its performance is not great as it
relies on having PyOS_InputHook called regularly.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# Make a temporary event loop and process system events until
# there are no more waiting, then allow idle events (which
# will also deal with pending or posted wx events.)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
del ea
return 0
class EventLoopTimer(wx.Timer):
def __init__(self, func):
self.func = func
wx.Timer.__init__(self)
def Notify(self):
self.func()
class EventLoopRunner(object):
def Run(self, time, input_is_ready):
self.input_is_ready = input_is_ready
self.evtloop = wx.EventLoop()
self.timer = EventLoopTimer(self.check_stdin)
self.timer.Start(time)
self.evtloop.Run()
def check_stdin(self):
if self.input_is_ready():
self.timer.Stop()
self.evtloop.Exit()
@ignore_keyboardinterrupts
def inputhook_wx2(context):
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10, # CHANGE time here to control polling interval
input_is_ready=context.input_is_ready)
return 0
@ignore_keyboardinterrupts
def inputhook_wx3(context):
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
t = clock()
while not context.input_is_ready():
while evtloop.Pending():
t = clock()
evtloop.Dispatch()
app.ProcessIdle()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
del ea
return 0
@ignore_keyboardinterrupts
def inputhook_wxphoenix(context):
"""Run the wx event loop until the user provides more input.
This input hook is suitable for use with wxPython >= 4 (a.k.a. Phoenix).
It uses the same approach to that used in
ipykernel.eventloops.loop_wx. The wx.MainLoop is executed, and a wx.Timer
is used to periodically poll the context for input. As soon as input is
ready, the wx.MainLoop is stopped.
"""
app = wx.GetApp()
if app is None:
return
if context.input_is_ready():
return
assert wx.IsMainThread()
# Wx uses milliseconds
poll_interval = 100
# Use a wx.Timer to periodically check whether input is ready - as soon as
# it is, we exit the main loop
timer = wx.Timer()
def poll(ev):
if context.input_is_ready():
timer.Stop()
app.ExitMainLoop()
timer.Start(poll_interval)
timer.Bind(wx.EVT_TIMER, poll)
# The import of wx on Linux sets the handler for signal.SIGINT to 0. This
# is a bug in wx or gtk. We fix by just setting it back to the Python
# default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
# The SetExitOnFrameDelete call allows us to run the wx mainloop without
# having a frame open.
app.SetExitOnFrameDelete(False)
app.MainLoop()
# Get the major wx version number to figure out what input hook we should use.
major_version = 3
try:
major_version = int(wx.__version__[0])
except Exception:
pass
# Use the phoenix hook on all platforms for wxpython >= 4
if major_version >= 4:
inputhook = inputhook_wxphoenix
# On OSX, evtloop.Pending() always returns True, regardless of there being
# any events pending. As such we can't use implementations 1 or 3 of the
# inputhook as those depend on a pending/dispatch loop.
elif sys.platform == 'darwin':
inputhook = inputhook_wx2
else:
inputhook = inputhook_wx3
| bsd-3-clause | ba413fa5a600fb7546b17ea8b9b05bf2 | 31.56621 | 79 | 0.640213 | 3.938156 | false | false | false | false |
ipython/ipython | IPython/core/magics/basic.py | 1 | 22932 | """Implementation of basic magic functions."""
from logging import error
import io
import os
from pprint import pformat
import sys
from warnings import warn
from traitlets.utils.importstring import import_item
from IPython.core import magic_arguments, page
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
from IPython.utils.text import format_screen, dedent, indent
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.ipstruct import Struct
class MagicsDisplay(object):
def __init__(self, magics_manager, ignore=None):
self.ignore = ignore if ignore else []
self.magics_manager = magics_manager
def _lsmagic(self):
"""The main implementation of the %lsmagic"""
mesc = magic_escapes['line']
cesc = magic_escapes['cell']
mman = self.magics_manager
magics = mman.lsmagic()
out = ['Available line magics:',
mesc + (' '+mesc).join(sorted([m for m,v in magics['line'].items() if (v not in self.ignore)])),
'',
'Available cell magics:',
cesc + (' '+cesc).join(sorted([m for m,v in magics['cell'].items() if (v not in self.ignore)])),
'',
mman.auto_status()]
return '\n'.join(out)
def _repr_pretty_(self, p, cycle):
p.text(self._lsmagic())
def __str__(self):
return self._lsmagic()
def _jsonable(self):
"""turn magics dict into jsonable dict of the same structure
replaces object instances with their class names as strings
"""
magic_dict = {}
mman = self.magics_manager
magics = mman.lsmagic()
for key, subdict in magics.items():
d = {}
magic_dict[key] = d
for name, obj in subdict.items():
try:
classname = obj.__self__.__class__.__name__
except AttributeError:
classname = 'Other'
d[name] = classname
return magic_dict
def _repr_json_(self):
return self._jsonable()
@magics_class
class BasicMagics(Magics):
"""Magics that provide central IPython functionality.
These are various magics that don't fit into specific categories but that
are all part of the base 'IPython experience'."""
@skip_doctest
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-l', '--line', action='store_true',
help="""Create a line magic alias."""
)
@magic_arguments.argument(
'-c', '--cell', action='store_true',
help="""Create a cell magic alias."""
)
@magic_arguments.argument(
'name',
help="""Name of the magic to be created."""
)
@magic_arguments.argument(
'target',
help="""Name of the existing line or cell magic."""
)
@magic_arguments.argument(
'-p', '--params', default=None,
help="""Parameters passed to the magic function."""
)
@line_magic
def alias_magic(self, line=''):
"""Create an alias for an existing line or cell magic.
Examples
--------
::
In [1]: %alias_magic t timeit
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
In [2]: %t -n1 pass
1 loops, best of 3: 954 ns per loop
In [3]: %%t -n1
...: pass
...:
1 loops, best of 3: 954 ns per loop
In [4]: %alias_magic --cell whereami pwd
UsageError: Cell magic function `%%pwd` not found.
In [5]: %alias_magic --line whereami pwd
Created `%whereami` as an alias for `%pwd`.
In [6]: %whereami
Out[6]: u'/home/testuser'
In [7]: %alias_magic h history "-p -l 30" --line
Created `%h` as an alias for `%history -l 30`.
"""
args = magic_arguments.parse_argstring(self.alias_magic, line)
shell = self.shell
mman = self.shell.magics_manager
escs = ''.join(magic_escapes.values())
target = args.target.lstrip(escs)
name = args.name.lstrip(escs)
params = args.params
if (params and
((params.startswith('"') and params.endswith('"'))
or (params.startswith("'") and params.endswith("'")))):
params = params[1:-1]
# Find the requested magics.
m_line = shell.find_magic(target, 'line')
m_cell = shell.find_magic(target, 'cell')
if args.line and m_line is None:
raise UsageError('Line magic function `%s%s` not found.' %
(magic_escapes['line'], target))
if args.cell and m_cell is None:
raise UsageError('Cell magic function `%s%s` not found.' %
(magic_escapes['cell'], target))
# If --line and --cell are not specified, default to the ones
# that are available.
if not args.line and not args.cell:
if not m_line and not m_cell:
raise UsageError(
'No line or cell magic with name `%s` found.' % target
)
args.line = bool(m_line)
args.cell = bool(m_cell)
params_str = "" if params is None else " " + params
if args.line:
mman.register_alias(name, target, 'line', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['line'], name,
magic_escapes['line'], target, params_str))
if args.cell:
mman.register_alias(name, target, 'cell', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['cell'], name,
magic_escapes['cell'], target, params_str))
@line_magic
def lsmagic(self, parameter_s=''):
"""List currently available magic functions."""
return MagicsDisplay(self.shell.magics_manager, ignore=[])
def _magic_docs(self, brief=False, rest=False):
"""Return docstrings from magic functions."""
mman = self.shell.magics_manager
docs = mman.lsmagic_docs(brief, missing='No documentation')
if rest:
format_string = '**%s%s**::\n\n%s\n\n'
else:
format_string = '%s%s:\n%s\n'
return ''.join(
[format_string % (magic_escapes['line'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['line'].items())]
+
[format_string % (magic_escapes['cell'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['cell'].items())]
)
@line_magic
def magic(self, parameter_s=''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
mode = parameter_s.split()[0][1:]
except IndexError:
pass
brief = (mode == 'brief')
rest = (mode == 'rest')
magic_docs = self._magic_docs(brief, rest)
if mode == 'latex':
print(self.format_latex(magic_docs))
return
else:
magic_docs = format_screen(magic_docs)
out = ["""
IPython's 'magic' functions
===========================
The magic function system provides a series of functions which allow you to
control the behavior of IPython itself, plus a lot of system-type
features. There are two kinds of magics, line-oriented and cell-oriented.
Line magics are prefixed with the % character and work much like OS
command-line calls: they get as an argument the rest of the line, where
arguments are passed without parentheses or quotes. For example, this will
time the given statement::
%timeit range(1000)
Cell magics are prefixed with a double %%, and they are functions that get as
an argument not only the rest of the line, but also the lines below it in a
separate argument. These magics are called with two arguments: the rest of the
call line and the body of the cell, consisting of the lines below the first.
For example::
%%timeit x = numpy.random.randn((100, 100))
numpy.linalg.svd(x)
will time the execution of the numpy svd routine, running the assignment of x
as part of the setup phase, which is not timed.
In a line-oriented client (the terminal or Qt console IPython), starting a new
input with %% will automatically enter cell mode, and IPython will continue
reading input until a blank line is given. In the notebook, simply type the
whole cell as one entity, but keep in mind that the %% escape can only be at
the very start of the cell.
NOTE: If you have 'automagic' enabled (via the command line option or with the
%automagic function), you don't need to type in the % explicitly for line
magics; cell magics always require an explicit '%%' escape. By default,
IPython ships with automagic on, so you should only rarely need the % escape.
Example: typing '%cd mydir' (without the quotes) changes your working directory
to 'mydir', if it exists.
For a list of the available magic functions, use %lsmagic. For a description
of any of them, type %magic_name?, e.g. '%cd?'.
Currently the magic system has the following functions:""",
magic_docs,
"Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
str(self.lsmagic()),
]
page.page('\n'.join(out))
@line_magic
def page(self, parameter_s=''):
"""Pretty print the object and display it through a pager.
%page [options] OBJECT
If no object is given, use _ (last output).
Options:
-r: page str(object), don't pretty-print it."""
# After a function contributed by Olivier Aubert, slightly modified.
# Process options/args
opts, args = self.parse_options(parameter_s, 'r')
raw = 'r' in opts
oname = args and args or '_'
info = self.shell._ofind(oname)
if info['found']:
if raw:
txt = str(info["obj"])
else:
txt = pformat(info["obj"])
page.page(txt)
else:
print('Object `%s` not found' % oname)
@line_magic
def pprint(self, parameter_s=''):
"""Toggle pretty printing on/off."""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.pprint = bool(1 - ptformatter.pprint)
print('Pretty printing has been turned',
['OFF','ON'][ptformatter.pprint])
@line_magic
def colors(self, parameter_s=''):
"""Switch color scheme for prompts, info system and exception handlers.
Currently implemented schemes: NoColor, Linux, LightBG.
Color scheme names are not case-sensitive.
Examples
--------
To get a plain black and white terminal::
%colors nocolor
"""
def color_switch_err(name):
warn('Error changing %s color schemes.\n%s' %
(name, sys.exc_info()[1]), stacklevel=2)
new_scheme = parameter_s.strip()
if not new_scheme:
raise UsageError(
"%colors: you must specify a color scheme. See '%colors?'")
# local shortcut
shell = self.shell
# Set shell colour scheme
try:
shell.colors = new_scheme
shell.refresh_style()
except:
color_switch_err('shell')
# Set exception colors
try:
shell.InteractiveTB.set_colors(scheme = new_scheme)
shell.SyntaxTB.set_colors(scheme = new_scheme)
except:
color_switch_err('exception')
# Set info (for 'object?') colors
if shell.color_info:
try:
shell.inspector.set_active_scheme(new_scheme)
except:
color_switch_err('object inspector')
else:
shell.inspector.set_active_scheme('NoColor')
@line_magic
def xmode(self, parameter_s=''):
"""Switch modes for the exception handlers.
Valid modes: Plain, Context, Verbose, and Minimal.
If called without arguments, acts as a toggle.
When in verbose mode the value `--show` (and `--hide`)
will respectively show (or hide) frames with ``__tracebackhide__ =
True`` value set.
"""
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
shell = self.shell
if parameter_s.strip() == "--show":
shell.InteractiveTB.skip_hidden = False
return
if parameter_s.strip() == "--hide":
shell.InteractiveTB.skip_hidden = True
return
new_mode = parameter_s.strip().capitalize()
try:
shell.InteractiveTB.set_mode(mode=new_mode)
print('Exception reporting mode:',shell.InteractiveTB.mode)
except:
xmode_switch_err('user')
@line_magic
def quickref(self, arg):
""" Show a quick reference sheet """
from IPython.core.usage import quick_reference
qr = quick_reference + self._magic_docs(brief=True)
page.page(qr)
@line_magic
def doctest_mode(self, parameter_s=''):
"""Toggle doctest mode on and off.
This mode is intended to make IPython behave as much as possible like a
plain Python shell, from the perspective of how its prompts, exceptions
and output look. This makes it easy to copy and paste parts of a
session into doctests. It does so by:
- Changing the prompts to the classic ``>>>`` ones.
- Changing the exception reporting mode to 'Plain'.
- Disabling pretty-printing of output.
Note that IPython also supports the pasting of code snippets that have
leading '>>>' and '...' prompts in them. This means that you can paste
doctests from files or docstrings (even if they have leading
whitespace), and the code will execute correctly. You can then use
'%history -t' to see the translated history; this will give you the
input after removal of all the leading prompts and whitespace, which
can be pasted back into an editor.
With these features, you can switch into this mode easily whenever you
need to do testing and changes to doctests, without having to leave
your existing IPython session.
"""
# Shorthands
shell = self.shell
meta = shell.meta
disp_formatter = self.shell.display_formatter
ptformatter = disp_formatter.formatters['text/plain']
# dstore is a data store kept in the instance metadata bag to track any
# changes we make, so we can undo them later.
dstore = meta.setdefault('doctest_mode',Struct())
save_dstore = dstore.setdefault
# save a few values we'll need to recover later
mode = save_dstore('mode',False)
save_dstore('rc_pprint',ptformatter.pprint)
save_dstore('xmode',shell.InteractiveTB.mode)
save_dstore('rc_separate_out',shell.separate_out)
save_dstore('rc_separate_out2',shell.separate_out2)
save_dstore('rc_separate_in',shell.separate_in)
save_dstore('rc_active_types',disp_formatter.active_types)
if not mode:
# turn on
# Prompt separators like plain python
shell.separate_in = ''
shell.separate_out = ''
shell.separate_out2 = ''
ptformatter.pprint = False
disp_formatter.active_types = ['text/plain']
shell.magic('xmode Plain')
else:
# turn off
shell.separate_in = dstore.rc_separate_in
shell.separate_out = dstore.rc_separate_out
shell.separate_out2 = dstore.rc_separate_out2
ptformatter.pprint = dstore.rc_pprint
disp_formatter.active_types = dstore.rc_active_types
shell.magic('xmode ' + dstore.xmode)
# mode here is the state before we switch; switch_doctest_mode takes
# the mode we're switching to.
shell.switch_doctest_mode(not mode)
# Store new mode and inform
dstore.mode = bool(not mode)
mode_label = ['OFF','ON'][dstore.mode]
print('Doctest mode is:', mode_label)
@line_magic
def gui(self, parameter_s=''):
"""Enable or disable IPython GUI event loop integration.
%gui [GUINAME]
This magic replaces IPython's threaded shells that were activated
using the (pylab/wthread/etc.) command line flags. GUI toolkits
can now be enabled at runtime and keyboard
interrupts should work without any problems. The following toolkits
are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
%gui wx # enable wxPython event loop integration
%gui qt4|qt # enable PyQt4 event loop integration
%gui qt5 # enable PyQt5 event loop integration
%gui gtk # enable PyGTK event loop integration
%gui gtk3 # enable Gtk3 event loop integration
%gui gtk4 # enable Gtk4 event loop integration
%gui tk # enable Tk event loop integration
%gui osx # enable Cocoa event loop integration
# (requires %matplotlib 1.1)
%gui # disable all event loop integration
WARNING: after any of these has been called you can simply create
an application object, but DO NOT start the event loop yourself, as
we have already handled that.
"""
opts, arg = self.parse_options(parameter_s, '')
if arg=='': arg = None
try:
return self.shell.enable_gui(arg)
except Exception as e:
# print simple error message, rather than traceback if we can't
# hook up the GUI
error(str(e))
@skip_doctest
@line_magic
def precision(self, s=''):
"""Set floating point precision for pretty printing.
Can set either integer precision or a format string.
If numpy has been imported and precision is an int,
numpy display precision will also be set, via ``numpy.set_printoptions``.
If no argument is given, defaults will be restored.
Examples
--------
::
In [1]: from math import pi
In [2]: %precision 3
Out[2]: u'%.3f'
In [3]: pi
Out[3]: 3.142
In [4]: %precision %i
Out[4]: u'%i'
In [5]: pi
Out[5]: 3
In [6]: %precision %e
Out[6]: u'%e'
In [7]: pi**10
Out[7]: 9.364805e+04
In [8]: %precision
Out[8]: u'%r'
In [9]: pi**10
Out[9]: 93648.047476082982
"""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.float_precision = s
return ptformatter.float_format
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'filename', type=str,
help='Notebook name or filename'
)
@line_magic
def notebook(self, s):
"""Export and convert IPython notebooks.
This function can export the current IPython history to a notebook file.
For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
"""
args = magic_arguments.parse_argstring(self.notebook, s)
outfname = os.path.expanduser(args.filename)
from nbformat import write, v4
cells = []
hist = list(self.shell.history_manager.get_range())
if(len(hist)<=1):
raise ValueError('History is empty, cannot export')
for session, execution_count, source in hist[:-1]:
cells.append(v4.new_code_cell(
execution_count=execution_count,
source=source
))
nb = v4.new_notebook(cells=cells)
with io.open(outfname, "w", encoding="utf-8") as f:
write(nb, f, version=4)
@magics_class
class AsyncMagics(BasicMagics):
@line_magic
def autoawait(self, parameter_s):
"""
Allow to change the status of the autoawait option.
This allow you to set a specific asynchronous code runner.
If no value is passed, print the currently used asynchronous integration
and whether it is activated.
It can take a number of value evaluated in the following order:
- False/false/off deactivate autoawait integration
- True/true/on activate autoawait integration using configured default
loop
- asyncio/curio/trio activate autoawait integration and use integration
with said library.
- `sync` turn on the pseudo-sync integration (mostly used for
`IPython.embed()` which does not run IPython with a real eventloop and
deactivate running asynchronous code. Turning on Asynchronous code with
the pseudo sync loop is undefined behavior and may lead IPython to crash.
If the passed parameter does not match any of the above and is a python
identifier, get said object from user namespace and set it as the
runner, and activate autoawait.
If the object is a fully qualified object name, attempt to import it and
set it as the runner, and activate autoawait.
The exact behavior of autoawait is experimental and subject to change
across version of IPython and Python.
"""
param = parameter_s.strip()
d = {True: "on", False: "off"}
if not param:
print("IPython autoawait is `{}`, and set to use `{}`".format(
d[self.shell.autoawait],
self.shell.loop_runner
))
return None
if param.lower() in ('false', 'off'):
self.shell.autoawait = False
return None
if param.lower() in ('true', 'on'):
self.shell.autoawait = True
return None
if param in self.shell.loop_runner_map:
self.shell.loop_runner, self.shell.autoawait = self.shell.loop_runner_map[param]
return None
if param in self.shell.user_ns :
self.shell.loop_runner = self.shell.user_ns[param]
self.shell.autoawait = True
return None
runner = import_item(param)
self.shell.loop_runner = runner
self.shell.autoawait = True
| bsd-3-clause | d3913db72d4571593746b848447aba0a | 33.69289 | 112 | 0.586866 | 4.199231 | false | false | false | false |
ipython/ipython | IPython/utils/contexts.py | 1 | 1619 | # encoding: utf-8
"""Miscellaneous context managers.
"""
import warnings
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
class preserve_keys(object):
"""Preserve a set of keys in a dictionary.
Upon entering the context manager the current values of the keys
will be saved. Upon exiting, the dictionary will be updated to
restore the original value of the preserved keys. Preserved keys
which did not exist when entering the context manager will be
deleted.
Examples
--------
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> with preserve_keys(d, 'b', 'c', 'd'):
... del d['a']
... del d['b'] # will be reset to 2
... d['c'] = None # will be reset to 3
... d['d'] = 4 # will be deleted
... d['e'] = 5
... print(sorted(d.items()))
...
[('c', None), ('d', 4), ('e', 5)]
>>> print(sorted(d.items()))
[('b', 2), ('c', 3), ('e', 5)]
"""
def __init__(self, dictionary, *keys):
self.dictionary = dictionary
self.keys = keys
def __enter__(self):
# Actions to perform upon exiting.
to_delete = []
to_update = {}
d = self.dictionary
for k in self.keys:
if k in d:
to_update[k] = d[k]
else:
to_delete.append(k)
self.to_delete = to_delete
self.to_update = to_update
def __exit__(self, *exc_info):
d = self.dictionary
for k in self.to_delete:
d.pop(k, None)
d.update(self.to_update)
| bsd-3-clause | 2302c4dc6208821d7c7e10068c105d19 | 25.540984 | 68 | 0.525015 | 3.646396 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/utils/track_version.py | 33 | 7516 | #!/usr/bin/env python
"""
Script to obtain version of Python modules and basic information on the
experiment setup (e.g. cpu, os), e.g.
* numpy: 1.6.1 | pylearn: a6e634b83d | pylearn2: 57a156beb0
* CPU: x86_64
* OS: Linux-2.6.35.14-106.fc14.x86_64-x86_64-with-fedora-14-Laughlin
You can also define the modules to be tracked with the environment
variable `PYLEARN2_TRACK_MODULES`. Use ":" to separate module names
between them, e.g. `PYLEARN2_TRACK_MODULES = module1:module2:module3`
By default, the following modules are tracked: pylearn2, theano, numpy, scipy
"""
__authors__ = "Olivier Dellaleau and Raul Chandias Ferrari"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Olivier Dellaleau", "Raul Chandias Ferrari"]
__license__ = "3-clause BSD"
__maintainer__ = "Raul Chandias Ferrari"
__email__ = "chandiar@iro"
import copy
import logging
import os
import platform
import socket
import subprocess
import sys
import warnings
from theano.compat import six
logger = logging.getLogger(__name__)
class MetaLibVersion(type):
"""
Constructor that will be called everytime another's class
constructor is called (if the "__metaclass__ = MetaLibVersion"
line is present in the other class definition).
Parameters
----------
cls : WRITEME
name : WRITEME
bases : WRITEME
dict : WRITEME
"""
def __init__(cls, name, bases, dict):
type.__init__(cls, name, bases, dict)
cls.libv = LibVersion()
class LibVersion(object):
"""
Initialize a LibVersion object that will store the version of python
packages in a dictionary (versions). The python packages that are
supported are: pylearn, pylearn2, theano, jobman, numpy and scipy.
The key for the versions dict is the name of the package and the
associated value is the version number.
"""
def __init__(self):
self.versions = {}
self.str_versions = ''
self.exp_env_info = {}
self._get_lib_versions()
self._get_exp_env_info()
def _get_exp_env_info(self):
"""
Get information about the experimental environment such as the
cpu, os and the hostname of the machine on which the experiment
is running.
"""
self.exp_env_info['host'] = socket.gethostname()
self.exp_env_info['cpu'] = platform.processor()
self.exp_env_info['os'] = platform.platform()
if 'theano' in sys.modules:
self.exp_env_info['theano_config'] = sys.modules['theano'].config
else:
self.exp_env_info['theano_config'] = None
def _get_lib_versions(self):
"""Get version of Python packages."""
repos = os.getenv('PYLEARN2_TRACK_MODULES', '')
default_repos = 'pylearn2:theano:numpy:scipy'
repos = default_repos + ":" + repos
repos = set(repos.split(':'))
for repo in repos:
try:
if repo == '':
continue
__import__(repo)
if hasattr(sys.modules[repo], '__version__'):
v = sys.modules[repo].__version__
if v != 'unknown':
self.versions[repo] = v
continue
self.versions[repo] = self._get_git_version(
self._get_module_parent_path(sys.modules[repo]))
except ImportError:
self.versions[repo] = None
known = copy.copy(self.versions)
# Put together all modules with unknown versions.
unknown = [k for k, w in known.items() if not w]
known = dict((k, w) for k, w in known.items() if w)
# Print versions.
self.str_versions = ' | '.join(
['%s:%s' % (k, w) for k, w in sorted(six.iteritems(known))] +
['%s:?' % ','.join(sorted(unknown))])
def __str__(self):
"""
Return version of the Python packages as a string.
e.g. numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0
"""
return self.str_versions
def _get_git_version(self, root):
"""
Return the git revision of a repository with the letter 'M'
appended to the revision if the repo was modified.
e.g. 10d3046e85 M
Parameters
----------
root : str
Root folder of the repository
Returns
-------
rval : str or None
A string with the revision hash, or None if it could not be
retrieved (e.g. if it is not actually a git repository)
"""
if not os.path.isdir(os.path.join(root, '.git')):
return None
cwd_backup = os.getcwd()
try:
os.chdir(root)
sub_p = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
version = sub_p.communicate()[0][0:10].strip()
sub_p = subprocess.Popen(['git', 'diff', '--name-only'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
modified = sub_p.communicate()[0]
if len(modified):
version += ' M'
return version
except Exception:
pass
finally:
try:
os.chdir(cwd_backup)
except Exception:
warnings.warn("Could not chdir back to " + cwd_backup)
def _get_hg_version(self, root):
"""Same as `get_git_version` but for a Mercurial repository."""
if not os.path.isdir(os.path.join(root, '.hg')):
return None
cwd_backup = os.getcwd()
try:
os.chdir(root)
sub_p = subprocess.Popen(['hg', 'parents'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sub_p_output = sub_p.communicate()[0]
finally:
os.chdir(cwd_backup)
first_line = sub_p_output.split('\n')[0]
# The first line looks like:
# changeset: 1517:a6e634b83d88
return first_line.split(':')[2][0:10]
def _get_module_path(self, module):
"""Return path to a given module."""
return os.path.realpath(module.__path__[0])
def _get_module_parent_path(self, module):
"""Return path to the parent directory of a given module."""
return os.path.dirname(self._get_module_path(module))
def print_versions(self):
"""
Print version of the Python packages as a string.
e.g. numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0
"""
logger.info(self.__str__())
def print_exp_env_info(self, print_theano_config=False):
"""
Return basic information about the experiment setup such as
the hostname of the machine the experiment was run on, the
operating system installed on the machine.
Parameters
----------
print_theano_config : bool, optional
If True, information about the theano configuration will be
displayed.
"""
logger.info('HOST: {0}'.format(self.exp_env_info['host']))
logger.info('CPU: {0}'.format(self.exp_env_info['cpu']))
logger.info('OS: {0}'.format(self.exp_env_info['os']))
if print_theano_config:
logger.info(self.exp_env_info['theano_config'])
| bsd-3-clause | b1220beffd88461e1426ca18e532aab1 | 33.635945 | 77 | 0.56546 | 3.989384 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/utils/iteration.py | 1 | 36271 | """
Iterators providing indices for different kinds of iteration over
datasets.
Presets:
- sequential: iterates through fixed slices of the dataset in sequence
- shuffled_sequential: iterates through a shuffled version of the dataset
in sequence
- random_slice: on each call to next, returns a slice of the dataset,
chosen uniformly at random over contiguous slices.
Samples with replacement, but still reports that
container is empty after num_examples / batch_size calls
- random_uniform: on each call to next, returns a random subset of the
dataset. Samples with replacement, but still reports that
container is empty after num_examples / batch_size calls
"""
from __future__ import division
import warnings
import numpy as np
from theano.compat import six
from pylearn2.space import CompositeSpace
from pylearn2.utils import safe_izip, wraps
from pylearn2.utils.data_specs import is_flat_specs
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_np_rng
import copy
# Make sure that the docstring uses restructured text list format.
# If you change the module-level docstring, please re-run
# pylearn2/doc/scripts/docgen.py and make sure sphinx doesn't issue any
# warnings for this file.
# This particular docstring was being frequently broken prior to the
# addition of this test.
# TODO: have nosetests run docgen.py in warning=error mode, remove
# tests for specific conditions
assert """Presets:
- sequential: iterates through fixed slices of the dataset in sequence
- s""" in __doc__
class SubsetIterator(object):
"""
An iterator that returns slices or lists of indices into a dataset
of a given fixed size.
Parameters
----------
dataset_size : int
The number of examples, total, in the dataset.
batch_size : int, optional
The (typical/maximum) number of examples per batch. Less
may be returned in the very last batch if batch size
does not evenly divide `dataset_size`.
num_batches : int, optional
The number of batches to return. Needn't be specified
if `batch_size` is specified. If both `batch_size` and
`num_batches` are specified then it must be true that
`batch_size * num_batches <= dataset_size`.
rng : `np.random.RandomState` or seed, optional
A `np.random.RandomState` object or the seed to be
used to create one. A deterministic default seed is
used otherwise.
"""
# This breaks the doc generation, so until we figure out why, not in the
# docstring.
#
# Attributes
# ----------
# batch_size : int
# num_batches : int
# num_examples : int
# uneven : bool
# fancy : bool
# `True` if this iterator produces lists of indices,
# `False` if it produces slices.
# stochastic : bool
# `True` if this iterator makes use of the random number
# generator, and will therefore produce different sequences
# depending on the RNG state. `False` otherwise.
def __init__(self, dataset_size, batch_size=None,
num_batches=None, rng=None):
raise NotImplementedError()
def next(self):
"""
Retrieves description of the next batch of examples.
Returns
-------
next_batch : `slice` or list of int
An object describing the indices in the dataset of
a batch of data. Either a `slice` object or a list
of integers specifying individual indices of
examples.
Raises
------
StopIteration
When there are no more batches to return.
"""
raise NotImplementedError()
def __next__(self):
self.next()
def __iter__(self):
return self
# Does this return subsets that need fancy indexing? (i.e. lists
# of indices)
fancy = False
# Does this class make use of random number generators?
stochastic = False
# Does it ensure that every batch has the same size?
uniform_batch_size = False
@property
def batch_size(self):
"""
The (maximum) number of examples in each batch.
Returns
-------
batch_size : int
The (maximum) number of examples in each batch. This is
either as specified via the constructor, or inferred from
the dataset size and the number of batches requested.
"""
return self._batch_size
@property
def num_batches(self):
"""
The total number of batches that the iterator will ever return.
Returns
-------
num_batches : int
The total number of batches the iterator will ever return.
This is either as specified via the constructor, or
inferred from the dataset size and the batch size.
"""
return self._num_batches
@property
def num_examples(self):
"""
The total number of examples over which the iterator operates.
Returns
-------
num_examples : int
The total number of examples over which the iterator operates.
May be less than the dataset size.
"""
return self.batch_size * self.num_batches
@property
def uneven(self):
"""
Whether every batch will be the same size.
Returns
-------
uneven : bool
`True` if returned batches may be of differing sizes,
`False` otherwise.
"""
raise NotImplementedError()
class ForcedEvenIterator(SubsetIterator):
"""
A class which wraps other iterators to ensure equal batch size.
This class needs to be completed using type() metaclass, see
Examples section to see how to use it.
Parameters
----------
dataset_size : int
Total number of examples in the dataset
batch_size : int or None
The size of the batches.
If set to None and num_batches is defined, batch_size will be
calculated based on dataset_size.
num_batches : int or None
The number of batch in the dataset.
If set to None and batch_size is defined, num_batches will be
calculated based on dataset_size.
*args : Variable length argument list for _base_iterator_cls
**kwargs : Arbitrary keyword arguments for _base_iterator_cls
Notes
-----
This class can not be initialized because it needs to be completed
using type() metaclass. See Examples section for more details.
Batches of size unequal to batch_size will be discarded. Those
examples will never be visited.
Examples
--------
>>> dct = ForcedEvenIterator.__dict__.copy()
>>> dct["_base_iterator_cls"] = SequentialSubsetIterator
>>> dct["fancy"] = SequentialSubsetIterator.fancy
>>> dct["stochastic"] = SequentialSubsetIterator.stochastic
>>>
>>> NewForcedEvenClass = type("ForcedEvenDummyIterator",
... ForcedEvenIterator.__bases__, dct)
>>>
>>> even_iterator = NewForcedEvenClass(dataset_size=100,
... batch_size=30, num_batches=None)
For a shortcut use function as_even()
>>> NewForcedEvenClass = as_even(SequentialSubsetIterator)
>>> even_iterator = NewForcedEvenClass(dataset_size=100,
... batch_size=30, num_batches=None)
"""
def __init__(self, dataset_size, batch_size, num_batches, *args, **kwargs):
if self.fancy is None or self.stochastic is None or \
self._base_iterator_cls is None:
raise ValueError("You must pre-define fancy, stochastic and "
"_base_iterator_cls arguments by creating a new "
"class using the metaclass type()."
"See function as_even() for an example.")
if batch_size is None:
if num_batches is not None:
batch_size = int(dataset_size / num_batches)
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = int(dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches of equal size with batch_size"
" %d, but %d batches were requested" %
(dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = int(dataset_size / batch_size)
self._base_iterator = self._base_iterator_cls(dataset_size, batch_size,
num_batches, *args,
**kwargs)
# Does it ensure that every batch has the same size?
uniform_batch_size = True
# Does this return subsets that need fancy indexing? (i.e. lists
# of indices)
# Needs to be set before initialization. See Examples section in class docs
fancy = None
# Does this class make use of random number generators?
# Needs to be set before initialization. See Examples section in class docs
stochastic = None
# base iterator that ForcedEvenIterator class wraps
# Needs to be set before initialization. See Examples section in class docs
_base_iterator_cls = None
@property
def _dataset_size(self):
return self._base_iterator._dataset_size
@property
def _batch_size(self):
return self._base_iterator._batch_size
@property
def _num_batches(self):
return self._base_iterator._num_batches
@property
def num_examples(self):
"""
Number of examples that will be visited
by the iterator. (May be lower than dataset_size)
"""
product = self.batch_size * self.num_batches
if product > self._dataset_size:
return self.batch_size * (self.num_batches - 1)
else:
return product
def next(self):
"""
Returns next batch of _base_iterator
Raises
------
StopException
When _base_iterator reaches the end of the dataset
Notes
-----
Uneven batches may be discarded and StopException
will be raised without having iterated throught
every examples.
"""
length = -1
# check if the batch has wrong length, throw it away
while length != self.batch_size:
batch = self._base_iterator.next()
if isinstance(batch, slice):
length = batch.stop-batch.start
else:
length = len(batch)
return batch
def __next__(self):
return self.next()
@property
@wraps(SubsetIterator.uneven, assigned=(), updated=())
def uneven(self):
return False
def as_even(iterator_cls):
"""
Returns a class wrapping iterator_cls that forces equal batch size.
Parameters
----------
iterator_cls : class
An iterator class that inherits from SubsetIterator
Returns
-------
class
An iterator class ForcedEven{put the name of iterator_cls here}, based
on ForcedEvenIterator, that wraps iterator_cls.
"""
assert issubclass(iterator_cls, SubsetIterator)
dct = ForcedEvenIterator.__dict__.copy()
dct["_base_iterator_cls"] = iterator_cls
dct["fancy"] = iterator_cls.fancy
dct["stochastic"] = iterator_cls.stochastic
NewForcedEvenClass = type("ForcedEven%s" % iterator_cls.__name__,
ForcedEvenIterator.__bases__, dct)
return NewForcedEvenClass
class SequentialSubsetIterator(SubsetIterator):
"""
Returns mini-batches proceeding sequentially through the dataset.
Notes
-----
Returns slice objects to represent ranges of indices (`fancy = False`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
if rng is not None:
raise ValueError("non-None rng argument not supported for "
"sequential batch iteration")
assert num_batches is None or num_batches >= 0
self._dataset_size = dataset_size
if batch_size is None:
if num_batches is not None:
batch_size = int(np.ceil(self._dataset_size / num_batches))
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = np.ceil(self._dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches with batch_size %d, but %d "
"batches were requested" %
(self._dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = np.ceil(self._dataset_size / batch_size)
self._batch_size = batch_size
self._num_batches = num_batches
self._next_batch_no = 0
self._idx = 0
self._batch = 0
@wraps(SubsetIterator.next, assigned=(), updated=())
def next(self):
if self._batch >= self.num_batches or self._idx >= self._dataset_size:
raise StopIteration()
# this fix the problem where dataset_size % batch_size != 0
elif (self._idx + self._batch_size) > self._dataset_size:
self._last = slice(self._idx, self._dataset_size)
self._idx = self._dataset_size
return self._last
else:
self._last = slice(self._idx, self._idx + self._batch_size)
self._idx += self._batch_size
self._batch += 1
return self._last
def __next__(self):
return self.next()
fancy = False
stochastic = False
uniform_batch_size = False
@property
@wraps(SubsetIterator.num_examples, assigned=(), updated=())
def num_examples(self):
product = self.batch_size * self.num_batches
return min(product, self._dataset_size)
@property
@wraps(SubsetIterator.uneven, assigned=(), updated=())
def uneven(self):
return self.batch_size * self.num_batches > self._dataset_size
class ShuffledSequentialSubsetIterator(SequentialSubsetIterator):
"""
Randomly shuffles the example indices and then proceeds sequentially
through the permutation.
Notes
-----
Returns lists of indices (`fancy = True`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
stochastic = True
fancy = True
uniform_batch_size = False
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
super(ShuffledSequentialSubsetIterator, self).__init__(
dataset_size,
batch_size,
num_batches,
None
)
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
self._shuffled = np.arange(self._dataset_size)
self._rng.shuffle(self._shuffled)
@wraps(SubsetIterator.next)
def next(self):
if self._batch >= self.num_batches or self._idx >= self._dataset_size:
raise StopIteration()
# this fix the problem where dataset_size % batch_size != 0
elif (self._idx + self._batch_size) > self._dataset_size:
rval = self._shuffled[self._idx: self._dataset_size]
self._idx = self._dataset_size
return rval
else:
rval = self._shuffled[self._idx: self._idx + self._batch_size]
self._idx += self._batch_size
self._batch += 1
return rval
def __next__(self):
return self.next()
class RandomUniformSubsetIterator(SubsetIterator):
"""
Selects minibatches of examples by drawing indices uniformly
at random, with replacement.
Notes
-----
Returns lists of indices (`fancy = True`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
if batch_size is None:
raise ValueError("batch_size cannot be None for random uniform "
"iteration")
elif num_batches is None:
raise ValueError("num_batches cannot be None for random uniform "
"iteration")
self._dataset_size = dataset_size
self._batch_size = batch_size
self._num_batches = num_batches
self._next_batch_no = 0
@wraps(SubsetIterator.next)
def next(self):
if self._next_batch_no >= self._num_batches:
raise StopIteration()
else:
self._last = self._rng.random_integers(low=0,
high=self._dataset_size - 1,
size=(self._batch_size,))
self._next_batch_no += 1
return self._last
def __next__(self):
return self.next()
fancy = True
stochastic = True
uniform_batch_size = True
class RandomSliceSubsetIterator(RandomUniformSubsetIterator):
"""
Returns minibatches that are randomly selected contiguous slices in
index space.
Notes
-----
Returns slice objects to represent ranges of indices (`fancy = False`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches, rng=None):
if batch_size is None:
raise ValueError("batch_size cannot be None for random slice "
"iteration")
elif num_batches is None:
raise ValueError("num_batches cannot be None for random slice "
"iteration")
super(RandomSliceSubsetIterator, self).__init__(dataset_size,
batch_size,
num_batches, rng)
self._last_start = self._dataset_size - self._batch_size
if self._last_start < 0:
raise ValueError("batch_size > dataset_size not supported for "
"random slice iteration")
@wraps(SubsetIterator.next)
def next(self):
if self._next_batch_no >= self._num_batches:
raise StopIteration()
else:
start = self._rng.random_integers(low=0, high=self._last_start)
self._last = slice(start, start + self._batch_size)
self._next_batch_no += 1
return self._last
def __next__(self):
return self.next()
fancy = False
stochastic = True
uniform_batch_size = True
class BatchwiseShuffledSequentialIterator(SequentialSubsetIterator):
"""
Returns minibatches randomly, but sequential inside each minibatch.
Notes
-----
Returns slice objects to represent ranges of indices (`fancy = False`).
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
assert num_batches is None or num_batches >= 0
self._dataset_size = dataset_size
if batch_size is None:
if num_batches is not None:
batch_size = int(np.ceil(self._dataset_size / num_batches))
else:
raise ValueError("need one of batch_size, num_batches "
"for sequential batch iteration")
elif batch_size is not None:
if num_batches is not None:
max_num_batches = np.ceil(self._dataset_size / batch_size)
if num_batches > max_num_batches:
raise ValueError("dataset of %d examples can only provide "
"%d batches with batch_size %d, but %d "
"batches were requested" %
(self._dataset_size, max_num_batches,
batch_size, num_batches))
else:
num_batches = np.ceil(self._dataset_size / batch_size)
self._batch_size = batch_size
self._num_batches = int(num_batches)
self._next_batch_no = 0
self._idx = 0
self._batch_order = list(range(self._num_batches))
self._rng.shuffle(self._batch_order)
@wraps(SubsetIterator.next)
def next(self):
if self._next_batch_no >= self._num_batches:
raise StopIteration()
else:
start = self._batch_order[self._next_batch_no] * self._batch_size
if start + self._batch_size > self._dataset_size:
self._last = slice(start, self._dataset_size)
else:
self._last = slice(start, start + self._batch_size)
self._next_batch_no += 1
return self._last
def __next__(self):
return self.next()
fancy = False
stochastic = True
uniform_batch_size = False
class EvenSequencesSubsetIterator(SubsetIterator):
"""
An iterator for datasets with sequential data (e.g. list of words)
which returns a list of indices of sequences in the dataset which have
the same length.
Within one minibatch all sequences will have the same length, so it
might return minibatches with different sizes depending on the
distribution of the lengths of sequences in the data.
Notes
-----
Returns lists of indices (`fancy = True`).
Parameters
----------
sequence_data : list of lists or ndarray of objects (ndarrays)
The sequential data used to determine indices within the dataset such
that within a minibatch all sequences will have same lengths.
See :py:class:`SubsetIterator` for detailed constructor parameter
and attribute documentation.
"""
def __init__(self, sequence_data, batch_size, num_batches=None, rng=None):
self._rng = make_np_rng(rng, which_method=["random_integers",
"shuffle"])
if batch_size is None:
raise ValueError("batch_size cannot be None for random uniform "
"iteration")
if num_batches is not None:
raise ValueError("EvenSequencesSubsetIterator doesn't support"
" fixed number of batches")
if isinstance(sequence_data, list):
self._dataset_size = len(sequence_data)
elif isinstance(sequence_data, np.ndarray):
self._dataset_size = sequence_data.shape[0]
else:
raise ValueError("sequence_data must be of type list or"
" ndarray")
self._sequence_data = sequence_data
self._batch_size = batch_size
self.prepare()
self.reset()
def prepare(self):
# find unique lengths in sequences
self.lengths = [len(s) for s in self._sequence_data]
self.len_unique = np.unique(self.lengths)
# store the indices of sequences for each unique length,
# and their counts
self.len_indices = dict()
self.len_counts = dict()
for ll in self.len_unique:
self.len_indices[ll] = np.where(self.lengths == ll)[0]
self.len_counts[ll] = len(self.len_indices[ll])
def reset(self):
# make a copy of the number of sequences that share a specific length
self.len_curr_counts = copy.copy(self.len_counts)
# permute the array of unique lengths every epoch
self.len_unique = self._rng.permutation(self.len_unique)
self.len_indices_pos = dict()
# save current total counts to decide when to stop iteration
self.total_curr_counts = 0
for ll in self.len_unique:
# keep a pointer to where we should start picking our minibatch of
# same length sequences
self.len_indices_pos[ll] = 0
# permute the array of indices of sequences with specific lengths
# every epoch
self.len_indices[ll] = self._rng.permutation(self.len_indices[ll])
self.total_curr_counts += len(self.len_indices[ll])
self.len_idx = -1
@wraps(SubsetIterator.next)
def next(self):
# stop when there are no more sequences left
if self.total_curr_counts == 0:
self.reset()
raise StopIteration()
# pick a length from the permuted array of lengths
while True:
self.len_idx = np.mod(self.len_idx+1, len(self.len_unique))
curr_len = self.len_unique[self.len_idx]
if self.len_curr_counts[curr_len] > 0:
break
# find the position and the size of the minibatch of sequences
# to be returned
curr_batch_size = np.minimum(self._batch_size,
self.len_curr_counts[curr_len])
curr_pos = self.len_indices_pos[curr_len]
# get the actual indices for the sequences
curr_indices = self.len_indices[curr_len][curr_pos:curr_pos +
curr_batch_size]
# update the pointer and counts of sequences in the chosen length
self.len_indices_pos[curr_len] += curr_batch_size
self.len_curr_counts[curr_len] -= curr_batch_size
self.total_curr_counts -= curr_batch_size
return curr_indices
def __next__(self):
return self.next()
@property
@wraps(SubsetIterator.num_examples, assigned=(), updated=())
def num_examples(self):
return len(self._sequence_data)
fancy = True
stochastic = True
uniform_batch_size = False
_iteration_schemes = {
'sequential': SequentialSubsetIterator,
'shuffled_sequential': ShuffledSequentialSubsetIterator,
'random_slice': RandomSliceSubsetIterator,
'random_uniform': RandomUniformSubsetIterator,
'batchwise_shuffled_sequential': BatchwiseShuffledSequentialIterator,
'even_sequential': as_even(SequentialSubsetIterator),
'even_shuffled_sequential': as_even(ShuffledSequentialSubsetIterator),
'even_batchwise_shuffled_sequential':
as_even(BatchwiseShuffledSequentialIterator),
'even_sequences': EvenSequencesSubsetIterator,
}
def has_uniform_batch_size(mode):
"""
Returns True if the iteration scheme has uniform batch size,
False if not
Parameters
----------
mode: string
A string defining an iteration scheme in _iteration_schemes
Returns
-------
boolean
True if the iteration scheme has uniform batch size,
False otherwise
"""
return resolve_iterator_class(mode).uniform_batch_size
def is_stochastic(mode):
"""
"""
return resolve_iterator_class(mode).stochastic
def resolve_iterator_class(mode):
"""
Map textual representations of default iteration modes to classes.
Parameters
----------
mode : str or class object
If a string, identifier string for the built-in iteration modes.
See the module documentation of :py:mod:`pylearn2.utils.iteration`
for a list of available modes. If a class, it is expected to
be a class that respects the constructor and attribute interface
defined in :py:class:`SubsetIterator`.
Returns
-------
subset_iter_class : class
A class instance (i.e., an instance of type `type`) that
interface defined in :py:class:`SubsetIterator`.
"""
if isinstance(mode, six.string_types) and mode not in _iteration_schemes:
raise ValueError("unknown iteration mode string: %s" % mode)
elif mode in _iteration_schemes:
subset_iter_class = _iteration_schemes[mode]
else:
subset_iter_class = mode
return subset_iter_class
class FiniteDatasetIterator(object):
"""
A wrapper around subset iterators that actually retrieves
data.
Parameters
----------
dataset : `Dataset` object
The dataset over which to iterate.
data_specs : tuple
A `(space, source)` tuple. See :ref:`data_specs` for a full
description. Must not contain nested composite spaces.
subset_iterator : object
An iterator object that returns slice objects or lists of
examples, conforming to the interface specified by
:py:class:`SubsetIterator`.
return_tuple : bool, optional
Always return a tuple, even if there is exactly one source
of data being returned. Defaults to `False`.
convert : list of callables
A list of callables, in the same order as the sources
in `data_specs`, that will be called on the individual
source batches prior to any further processing.
Notes
-----
See the documentation for :py:class:`SubsetIterator` for
attribute documentation.
The dataset should provide a `get` method which accepts a tuple of source
identifiers and a list or slice of indexes and returns a tuple of batches
of examples, one for each source. The old interface using `get_data` is
still supported for the moment being.
"""
def __init__(self, dataset, subset_iterator, data_specs=None,
return_tuple=False, convert=None):
self._data_specs = data_specs
self._dataset = dataset
self._subset_iterator = subset_iterator
self._return_tuple = return_tuple
# Keep only the needed sources in self._raw_data.
# Remember what source they correspond to in self._source
assert is_flat_specs(data_specs)
dataset_space, dataset_source = self._dataset.get_data_specs()
assert is_flat_specs((dataset_space, dataset_source))
# the dataset's data spec is either a single (space, source) pair,
# or a pair of (non-nested CompositeSpace, non-nested tuple).
# We could build a mapping and call flatten(..., return_tuple=True)
# but simply putting spaces, sources and data in tuples is simpler.
if not isinstance(dataset_source, (tuple, list)):
dataset_source = (dataset_source,)
if not isinstance(dataset_space, CompositeSpace):
dataset_sub_spaces = (dataset_space,)
else:
dataset_sub_spaces = dataset_space.components
assert len(dataset_source) == len(dataset_sub_spaces)
space, source = data_specs
if not isinstance(source, tuple):
source = (source,)
if not isinstance(space, CompositeSpace):
sub_spaces = (space,)
else:
sub_spaces = space.components
assert len(source) == len(sub_spaces)
# If `dataset` is incompatible with the new interface, fall back to the
# old interface
if not hasattr(self._dataset, 'get'):
all_data = self._dataset.get_data()
if not isinstance(all_data, tuple):
all_data = (all_data,)
raw_data = []
for s in source:
try:
raw_data.append(all_data[dataset_source.index(s)])
except ValueError as e:
msg = str(e) + '\nThe dataset does not provide '\
'a source with name: ' + s + '.'
reraise_as(ValueError(msg))
self._raw_data = tuple(raw_data)
self._source = source
self._space = sub_spaces
if convert is None:
self._convert = [None for s in source]
else:
assert len(convert) == len(source)
self._convert = convert
for i, (so, sp) in enumerate(safe_izip(source, sub_spaces)):
try:
idx = dataset_source.index(so)
except ValueError as e:
msg = str(e) + '\nThe dataset does not provide '\
'a source with name: ' + so + '.'
reraise_as(ValueError(msg))
dspace = dataset_sub_spaces[idx]
fn = self._convert[i]
# If there is a fn, it is supposed to take care of the formatting,
# and it should be an error if it does not. If there was no fn,
# then the iterator will try to format using the generic
# space-formatting functions.
if fn is None:
# "dspace" and "sp" have to be passed as parameters
# to lambda, in order to capture their current value,
# otherwise they would change in the next iteration
# of the loop.
fn = (lambda batch, dspace=dspace, sp=sp:
dspace.np_format_as(batch, sp))
self._convert[i] = fn
def __iter__(self):
return self
@wraps(SubsetIterator.next)
def next(self):
"""
Retrieves the next batch of examples.
Returns
-------
next_batch : object
An object representing a mini-batch of data, conforming
to the space specified in the `data_specs` constructor
argument to this iterator. Will be a tuple if more
than one data source was specified or if the constructor
parameter `return_tuple` was `True`.
Raises
------
StopIteration
When there are no more batches to return.
"""
next_index = self._subset_iterator.next()
# If the dataset is incompatible with the new interface, fall back to
# the old one
if hasattr(self._dataset, 'get'):
rval = self._next(next_index)
else:
rval = self._fallback_next(next_index)
if not self._return_tuple and len(rval) == 1:
rval, = rval
return rval
def _next(self, next_index):
return tuple(
fn(batch) if fn else batch for batch, fn in
safe_izip(self._dataset.get(self._source, next_index),
self._convert)
)
def _fallback_next(self, next_index):
# TODO: handle fancy-index copies by allocating a buffer and
# using np.take()
return tuple(
fn(data[next_index]) if fn else data[next_index]
for data, fn in safe_izip(self._raw_data, self._convert)
)
def __next__(self):
return self.next()
@property
@wraps(SubsetIterator.batch_size, assigned=(), updated=())
def batch_size(self):
return self._subset_iterator.batch_size
@property
@wraps(SubsetIterator.num_batches, assigned=(), updated=())
def num_batches(self):
return self._subset_iterator.num_batches
@property
@wraps(SubsetIterator.num_examples, assigned=(), updated=())
def num_examples(self):
return self._subset_iterator.num_examples
@property
@wraps(SubsetIterator.uneven, assigned=(), updated=())
def uneven(self):
return self._subset_iterator.uneven
@property
@wraps(SubsetIterator.stochastic, assigned=(), updated=())
def stochastic(self):
return self._subset_iterator.stochastic
| bsd-3-clause | feaf896de09339cf0daa2b0b4c1fcb9f | 34.180407 | 79 | 0.594966 | 4.497334 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/utils/datasets.py | 44 | 9068 | """
Several utilities to evaluate an ALC on the dataset, to iterate over
minibatches from a dataset, or to merge three data with given proportions
"""
# Standard library imports
import logging
import os
import functools
from itertools import repeat
import warnings
# Third-party imports
import numpy
import scipy
from theano.compat.six.moves import reduce, xrange
import theano
try:
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
warnings.warn("Could not import some dependencies.")
# Local imports
from pylearn2.utils.rng import make_np_rng
logger = logging.getLogger(__name__)
##################################################
# 3D Visualization
##################################################
def do_3d_scatter(x, y, z, figno=None, title=None):
"""
Generate a 3D scatterplot figure and optionally give it a title.
Parameters
----------
x : WRITEME
y : WRITEME
z : WRITEME
figno : WRITEME
title : WRITEME
"""
fig = pyplot.figure(figno)
ax = Axes3D(fig)
ax.scatter(x, y, z)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
pyplot.suptitle(title)
def save_plot(repr, path, name="figure.pdf", title="features"):
"""
.. todo::
WRITEME
"""
# TODO : Maybe run a PCA if shape[1] > 3
assert repr.get_value(borrow=True).shape[1] == 3
# Take the first 3 columns
x, y, z = repr.get_value(borrow=True).T
do_3d_scatter(x, y, z)
# Save the produces figure
filename = os.path.join(path, name)
pyplot.savefig(filename, format="pdf")
logger.info('... figure saved: {0}'.format(filename))
##################################################
# Features or examples filtering
##################################################
def filter_labels(train, label, classes=None):
"""
Filter examples of train for which we have labels
Parameters
----------
train : WRITEME
label : WRITEME
classes : WRITEME
Returns
-------
WRITEME
"""
if isinstance(train, theano.tensor.sharedvar.SharedVariable):
train = train.get_value(borrow=True)
if isinstance(label, theano.tensor.sharedvar.SharedVariable):
label = label.get_value(borrow=True)
if not (isinstance(train, numpy.ndarray) or scipy.sparse.issparse(train)):
raise TypeError('train must be a numpy array, a scipy sparse matrix,'
' or a theano shared array')
# Examples for which any label is set
if classes is not None:
label = label[:, classes]
# Special case for sparse matrices
if scipy.sparse.issparse(train):
idx = label.sum(axis=1).nonzero()[0]
return (train[idx], label[idx])
# Compress train and label arrays according to condition
condition = label.any(axis=1)
return tuple(var.compress(condition, axis=0) for var in (train, label))
def nonzero_features(data, combine=None):
"""
Get features for which there are nonzero entries in the data.
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function, optional
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
Notes
-----
I would return a mask (bool array) here, but scipy.sparse doesn't appear to
fully support advanced indexing.
"""
if combine is None:
combine = functools.partial(reduce, numpy.logical_and)
# Assumes all values are >0, which is the case for all sparse datasets.
masks = numpy.asarray([subset.sum(axis=0) for subset in data]).squeeze()
nz_feats = combine(masks).nonzero()[0]
return nz_feats
# TODO: Is this a duplicate?
def filter_nonzero(data, combine=None):
"""
Filter non-zero features of data according to a certain combining function
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
"""
nz_feats = nonzero_features(data, combine)
return [set[:, nz_feats] for set in data]
##################################################
# Iterator object for minibatches of datasets
##################################################
class BatchIterator(object):
"""
Builds an iterator object that can be used to go through the minibatches
of a dataset, with respect to the given proportions in conf
Parameters
----------
dataset : WRITEME
set_proba : WRITEME
batch_size : WRITEME
seed : WRITEME
"""
def __init__(self, dataset, set_proba, batch_size, seed=300):
# Local shortcuts for array operations
flo = numpy.floor
sub = numpy.subtract
mul = numpy.multiply
div = numpy.divide
mod = numpy.mod
# Record external parameters
self.batch_size = batch_size
if (isinstance(dataset[0], theano.Variable)):
self.dataset = [set.get_value(borrow=True) for set in dataset]
else:
self.dataset = dataset
# Compute maximum number of samples for one loop
set_sizes = [set.shape[0] for set in self.dataset]
set_batch = [float(self.batch_size) for i in xrange(3)]
set_range = div(mul(set_proba, set_sizes), set_batch)
set_range = map(int, numpy.ceil(set_range))
# Upper bounds for each minibatch indexes
set_limit = numpy.ceil(numpy.divide(set_sizes, set_batch))
self.limit = map(int, set_limit)
# Number of rows in the resulting union
set_tsign = sub(set_limit, flo(div(set_sizes, set_batch)))
set_tsize = mul(set_tsign, flo(div(set_range, set_limit)))
l_trun = mul(flo(div(set_range, set_limit)), mod(set_sizes, set_batch))
l_full = mul(sub(set_range, set_tsize), set_batch)
self.length = sum(l_full) + sum(l_trun)
# Random number generation using a permutation
index_tab = []
for i in xrange(3):
index_tab.extend(repeat(i, set_range[i]))
# Use a deterministic seed
self.seed = seed
rng = make_np_rng(seed, which_method="permutation")
self.permut = rng.permutation(index_tab)
def __iter__(self):
"""Generator function to iterate through all minibatches"""
counter = [0, 0, 0]
for chosen in self.permut:
# Retrieve minibatch from chosen set
index = counter[chosen]
minibatch = self.dataset[chosen][
index * self.batch_size:(index + 1) * self.batch_size
]
# Increment the related counter
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
# Return the computed minibatch
yield minibatch
def __len__(self):
"""Return length of the weighted union"""
return self.length
def by_index(self):
"""Same generator as __iter__, but yield only the chosen indexes"""
counter = [0, 0, 0]
for chosen in self.permut:
index = counter[chosen]
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
yield chosen, index
##################################################
# Miscellaneous
##################################################
def minibatch_map(fn, batch_size, input_data, output_data=None,
output_width=None):
"""
Apply a function on input_data, one minibatch at a time.
Storage for the output can be provided. If it is the case,
it should have appropriate size.
If output_data is not provided, then output_width should be specified.
Parameters
----------
fn : WRITEME
batch_size : WRITEME
input_data : WRITEME
output_data : WRITEME
output_width : WRITEME
Returns
-------
WRITEME
"""
if output_width is None:
if output_data is None:
raise ValueError('output_data or output_width should be provided')
output_width = output_data.shape[1]
output_length = input_data.shape[0]
if output_data is None:
output_data = numpy.empty((output_length, output_width))
else:
assert output_data.shape[0] == input_data.shape[0], ('output_data '
'should have the same length as input_data',
output_data.shape[0], input_data.shape[0])
for i in xrange(0, output_length, batch_size):
output_data[i:i+batch_size] = fn(input_data[i:i+batch_size])
return output_data
| bsd-3-clause | 8d83e4b1375f9974b47a4136c87c2eb8 | 29.02649 | 79 | 0.600794 | 4.123693 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/scripts/dbm/dbm_metrics.py | 7 | 24693 | #!/usr/bin/env python
__authors__ = "Vincent Dumoulin"
__copyright__ = "Copyright 2013, Universite de Montreal"
__credits__ = ["Guillaume Desjargins", "Vincent Dumoulin"]
__license__ = "3-clause BSD"
__maintainer__ = "Vincent Dumoulin"
"""
This script computes both an estimate of the partition function of the provided
DBM model and an estimate of the log-likelihood on the given training and test
sets.
This is guaranteed to work only for DBMs with a BinaryVector visible layer and
BinaryVectorMaxPool hidden layers with pool sizes of 1.
It uses annealed importance sampling (AIS) to estimate Z, the partition
function.
TODO: add more details, cite paper
usage: dbm_metrics.py [-h] {ais} model_path
positional arguments:
{ais} the desired metric
model_path path to the pickled DBM model
optional arguments:
-h, --help show the help message and exit
"""
import argparse
import warnings
import numpy
import logging
from theano.compat.six.moves import xrange
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano import scan
import pylearn2
from pylearn2.compat import OrderedDict
from pylearn2.datasets.mnist import MNIST
from pylearn2.utils import serial
from pylearn2 import utils
floatX = theano.config.floatX
logging.basicConfig(level=logging.INFO)
rng = numpy.random.RandomState(9873242)
theano_rng = RandomStreams(rng.randint(2**30))
def _sample_even_odd(W_list, b_list, samples, beta, odd=True):
"""
Sample from the even (or odd) layers given a list of previous states.
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
samples : array-like object of theano shared variables
Samples corresponding to the previous states
beta : theano.tensor.scalar
Inverse temperature parameter
odd : boolean
Whether to sample from the odd or the even layers (defaults to sampling
from odd layers)
"""
for i in xrange(odd, len(samples), 2):
samples[i] = sample_hi_given(samples, i, W_list, b_list, beta)
def _activation_even_odd(W_list, b_list, samples, beta, odd=True):
"""
Compute the activation of the even (or odd) layers given a list of
previous states.
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
samples : array-like object of theano shared variables
Samples corresponding to the previous states
beta : theano.tensor.scalar
Inverse temperature parameter
odd : boolean
Whether to compute activation for the odd or the even layers (defaults
to computing for odd layers)
"""
for i in xrange(odd, len(samples), 2):
samples[i] = hi_given(samples, i, W_list, b_list, beta,
apply_sigmoid=False)
def neg_sampling(W_list, b_list, nsamples, beta=1.0, pa_bias=None,
marginalize_odd=True, theano_rng=None):
"""
Generate a sample from the intermediate distribution defined at inverse
temperature 'beta', starting from state 'nsamples'. See file docstring for
equation of p_k(h1).
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
nsamples : array-like object of theano shared variables
Negative samples corresponding to the previous states
beta : theano.tensor.scalar
Inverse temperature parameter
marginalize_odd : boolean
Whether to marginalize odd layers
theano_rng : theano RandomStreams
Random number generator
Returns
-------
new_nsamples : array-like object of symbolic matrices
new_nsamples[i] contains new samples for i-th layer.
"""
# There's as much layers in the DBM as there are bias vectors
depth = len(b_list)
new_nsamples = [nsamples[i] for i in xrange(depth)]
# Contribution from model B, at temperature beta_k
_sample_even_odd(W_list, b_list, new_nsamples, beta, odd=marginalize_odd)
_activation_even_odd(W_list, b_list, new_nsamples, beta,
odd=not marginalize_odd)
# Contribution from model A, at temperature (1 - beta_k)
new_nsamples[not marginalize_odd] += pa_bias * (1. - beta)
# Loop over all layers (not being marginalized)
for i in xrange(not marginalize_odd, depth, 2):
new_nsamples[i] = T.nnet.sigmoid(new_nsamples[i])
new_nsamples[i] = theano_rng.binomial(
size=nsamples[i].get_value().shape, n=1, p=new_nsamples[i],
dtype=floatX
)
return new_nsamples
def free_energy_at_beta(W_list, b_list, samples, beta, pa_bias=None,
marginalize_odd=True):
"""
Compute the free-energy of the sample 'h1_sample', for model p_k(h1).
Parameters
----------
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
samples : array-like object of theano shared variable
Samples from which we extract the samples of layer h1
beta : theano.tensor.scalar
Inverse temperature beta_k of model p_k(h1) at which to measure the
free-energy.
pa_bias : array-like object of theano shared variables
Biases for the A model
marginalize_odd : boolean
Whether to marginalize odd layers
Returns
-------
fe : symbolic variable
Free-energy of sample 'h1_sample', at inverse temperature beta
"""
# There's as much layers in the DBM as there are bias vectors
depth = len(b_list)
fe = 0.
# Contribution of biases
keep_idx = numpy.arange(not marginalize_odd, depth, 2)
for i in keep_idx:
fe -= T.dot(samples[i], b_list[i]) * beta
# Contribution of biases
marg_idx = numpy.arange(marginalize_odd, depth, 2)
for i in marg_idx:
from_im1 = T.dot(samples[i-1], W_list[i]) if i >= 1 else 0.
from_ip1 = T.dot(samples[i+1], W_list[i+1].T) if i < depth-1 else 0
net_input = (from_im1 + from_ip1 + b_list[i]) * beta
fe -= T.sum(T.nnet.softplus(net_input), axis=1)
fe -= T.dot(samples[not marginalize_odd], pa_bias) * (1. - beta)
return fe
def compute_log_ais_weights(batch_size, free_energy_fn, sample_fn, betas):
"""
Compute log of the AIS weights
Parameters
----------
batch_size : scalar
Size of a batch of samples
free_energy_fn : theano.function
Function which, given temperature beta_k, computes the free energy
of the samples stored in model.samples. This function should return
a symbolic vector.
sample_fn : theano.function
Function which, given temperature beta_k, generates samples h1 ~
p_k(h1).
betas : array-like object of scalars
Inverse temperature parameters for which to compute the log_ais weights
Returns
-------
log_ais_w : theano.tensor.vector
Vector containing log ais-weights
"""
# Initialize log-ais weights
log_ais_w = numpy.zeros(batch_size, dtype=floatX)
# Iterate from inverse temperature beta_k=0 to beta_k=1...
for i in range(len(betas) - 1):
bp, bp1 = betas[i], betas[i+1]
log_ais_w += free_energy_fn(bp) - free_energy_fn(bp1)
sample_fn(bp1)
if i % 1e3 == 0:
logging.info('Temperature %f ' % bp1)
return log_ais_w
def estimate_from_weights(log_ais_w):
"""
Safely compute the log-average of the ais-weights
Parameters
----------
log_ais_w : theano.tensor.vector
Symbolic vector containing log_ais_w^{(m)}.
Returns
-------
dlogz : theano.tensor.scalar
log(Z_B) - log(Z_A)
var_dlogz : theano.tensor.scalar
Variance of our estimator
"""
# Utility function for safely computing log-mean of the ais weights
ais_w = T.vector()
max_ais_w = T.max(ais_w)
dlogz = T.log(T.mean(T.exp(ais_w - max_ais_w))) + max_ais_w
log_mean = theano.function([ais_w], dlogz, allow_input_downcast=False)
# Estimate the log-mean of the AIS weights
dlogz = log_mean(log_ais_w)
# Estimate log-variance of the AIS weights
# VAR(log(X)) \approx VAR(X) / E(X)^2 = E(X^2)/E(X)^2 - 1
m = numpy.max(log_ais_w)
var_dlogz = (log_ais_w.shape[0] *
numpy.sum(numpy.exp(2 * (log_ais_w - m))) /
numpy.sum(numpy.exp(log_ais_w - m)) ** 2 - 1.)
return dlogz, var_dlogz
def compute_log_za(b_list, pa_bias, marginalize_odd=True):
"""
Compute the exact partition function of model p_A(h1)
Parameters
----------
b_list : array-like object of theano shared variables
Biases of the DBM
pa_bias : array-like object of theano shared variables
Biases for the A model
marginalize_odd : boolean
Whether to marginalize odd layers
Returns
-------
log_za : scalar
Partition function of model A
"""
log_za = 0.
for i, b in enumerate(b_list):
if i == (not marginalize_odd):
log_za += numpy.sum(numpy.log(1 + numpy.exp(pa_bias)))
else:
log_za += numpy.log(2) * b.get_value().shape[0]
return log_za
def compute_likelihood_given_logz(nsamples, psamples, batch_size, energy_fn,
inference_fn, log_z, test_x):
"""
Compute test set likelihood as below, where q is the variational
approximation to the posterior p(h1,h2|v).
ln p(v) \approx \sum_h q(h) E(v,h1,h2) + H(q) - ln Z
See section 3.2 of DBM paper for details.
Parameters
----------
nsamples : array-like object of theano shared variables
Negative samples
psamples : array-like object of theano shared variables
Positive samples
batch_size : scalar
Size of a batch of samples
energy_fn : theano.function
Function which computes the (temperature 1) energy of the samples. This
function should return a symbolic vector.
inference_fn : theano.function
Inference function for DBM. Function takes a T.matrix as input (data)
and returns a list of length 'length(b_list)', where the i-th element
is an ndarray containing approximate samples of layer i.
log_z : scalar
Estimate partition function of 'model'.
test_x : numpy.ndarray
Test set data, in dense design matrix format.
Returns
-------
likelihood : scalar
Negative log-likelihood of test data under the model
"""
i = 0.
likelihood = 0
for i in xrange(0, len(test_x), batch_size):
# Recast data as floatX and apply preprocessing if required
x = numpy.array(test_x[i:numpy.minimum(test_x.shape[0], i + batch_size), :], dtype=floatX)
batch_size0 = len(x)
if len(x) < batch_size:
# concatenate x to have some dummy entries
x = numpy.concatenate((x, numpy.zeros((batch_size-len(x),x.shape[1]), dtype=floatX)), axis=0)
# Perform inference
inference_fn(x)
# Entropy of h(q) adds contribution to variational lower-bound
hq = 0
for psample in psamples[1:]:
temp = \
- psample.get_value() * numpy.log(1e-5 + psample.get_value()) \
- (1.-psample.get_value()) \
* numpy.log(1. - psample.get_value() + 1e-5)
hq += numpy.sum(temp, axis=1)
# Copy into negative phase buffers to measure energy
nsamples[0].set_value(x)
for ii, psample in enumerate(psamples):
if ii > 0:
nsamples[ii].set_value(psample.get_value())
# Compute sum of likelihood for current buffer
x_likelihood = numpy.sum((-energy_fn(1.0) + hq - log_z)[:batch_size0])
# Perform moving average of negative likelihood
# Divide by len(x) and not bufsize, since last buffer might be smaller
likelihood = (i * likelihood + x_likelihood) / (i + batch_size0)
return likelihood
def hi_given(samples, i, W_list, b_list, beta=1.0, apply_sigmoid=True):
"""
Compute the state of hidden layer i given all other layers
Parameters
----------
samples : array-like object of theano shared variables
For the positive phase, samples[0] points to the input, while
samples[i] contains the current state of the i-th layer. In the
negative phase, samples[i] contains the persistent chain associated
with the i-th layer.
i : integer
Compute activation of layer i of our DBM
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
beta : scalar
Inverse temperature parameter used when performing AIS
apply_sigmoid : boolean
When False, hi_given will not apply the sigmoid. Useful for AIS
estimate.
Returns
-------
hi_mean : symbolic variable
Activation of the i-th layer
"""
# There's as much layers in the DBM as there are bias vectors
depth = len(samples)
hi_mean = 0.
if i < depth-1:
# Top-down input
wip1 = W_list[i+1]
hi_mean += T.dot(samples[i+1], wip1.T) * beta
if i > 0:
# Bottom-up input
wi = W_list[i]
hi_mean += T.dot(samples[i-1], wi) * beta
hi_mean += b_list[i] * beta
if apply_sigmoid:
return T.nnet.sigmoid(hi_mean)
else:
return hi_mean
def sample_hi_given(samples, i, W_list, b_list, beta=1.0):
"""
Given current state of our DBM ('samples'), sample the values taken by
the i-th layer.
Parameters
----------
samples : array-like object of theano shared variables
For the positive phase, samples[0] points to the input, while
samples[i] contains the current state of the i-th layer. In the
negative phase, samples[i] contains the persistent chain associated
with the i-th layer.
i : integer
Compute activation of layer i of our DBM
W_list : array-like object of theano shared variables
Weight matrices of the DBM. Its first element is ignored, since in the
Pylearn2 framework a visible layer does not have a weight matrix.
b_list : array-like object of theano shared variables
Biases of the DBM
beta : scalar
Inverse temperature parameter used when performing AIS
Returns
-------
hi_sample : symbolic variable
State of the i-th layer
"""
hi_mean = hi_given(samples, i, W_list, b_list, beta)
hi_sample = theano_rng.binomial(
size=samples[i].get_value().shape,
n=1, p=hi_mean,
dtype=floatX
)
return hi_sample
def _e_step(psamples, W_list, b_list, n_steps=100, eps=1e-5):
"""
Performs 'n_steps' of mean-field inference (used to compute positive phase
statistics)
Parameters
----------
psamples : array-like object of theano shared variables
State of each layer of the DBM (during the inference process).
psamples[0] points to the input
n_steps : integer
Number of iterations of mean-field to perform
"""
depth = len(psamples)
# now alternate mean-field inference for even/odd layers
def mf_iteration(*psamples):
new_psamples = [p for p in psamples]
for i in xrange(1, depth, 2):
new_psamples[i] = hi_given(psamples, i, W_list, b_list)
for i in xrange(2, depth, 2):
new_psamples[i] = hi_given(psamples, i, W_list, b_list)
score = 0.
for i in xrange(1, depth):
score = T.maximum(T.mean(abs(new_psamples[i] - psamples[i])),
score)
return new_psamples, theano.scan_module.until(score < eps)
new_psamples, updates = scan(
mf_iteration,
outputs_info=psamples,
n_steps=n_steps
)
return [x[-1] for x in new_psamples]
def estimate_likelihood(W_list, b_list, trainset, testset, free_energy_fn=None,
batch_size=100, large_ais=False, log_z=None,
pos_mf_steps=50, pos_sample_steps=0):
"""
Compute estimate of log-partition function and likelihood of trainset and
testset
Parameters
----------
W_list : array-like object of theano shared variables
b_list : array-like object of theano shared variables
Biases of the DBM
trainset : pylearn2.datasets.dataset.Dataset
Training set
testset : pylearn2.datasets.dataset.Dataset
Test set
free_energy_fn : theano.function
Function which, given temperature beta_k, computes the free energy
of the samples stored in model.samples. This function should return
a symbolic vector.
batch_size : integer
Size of a batch of examples
large_ais : boolean
If True, will use 3e5 chains, instead of 3e4
log_z : log-partition function (if precomputed)
pos_mf_steps: the number of fixed-point iterations for approximate inference
pos_sample_steps: same thing as pos_mf_steps
when both pos_mf_steps > 0 and pos_sample_steps > 0,
pos_mf_steps has a priority
Returns
-------
nll : scalar
Negative log-likelihood of data.X under `model`.
logz : scalar
Estimate of log-partition function of `model`.
"""
warnings.warn("This is garanteed to work only for DBMs with a " +
"BinaryVector visible layer and BinaryVectorMaxPool " +
"hidden layers with pool sizes of 1.")
# Add a dummy placeholder for visible layer's weights in W_list
W_list = [None] + W_list
# Depth of the DBM
depth = len(b_list)
# Initialize samples
psamples = []
nsamples = []
for i, b in enumerate(b_list):
psamples += [utils.sharedX(rng.rand(batch_size,
b.get_value().shape[0]),
name='psamples%i' % i)]
nsamples += [utils.sharedX(rng.rand(batch_size,
b.get_value().shape[0]),
name='nsamples%i' % i)]
psamples[0] = T.matrix('psamples0')
##########################
## BUILD THEANO FUNCTIONS
##########################
beta = T.scalar()
# For an even number of layers, we marginalize the odd layers
# (and vice-versa)
marginalize_odd = (depth % 2) == 0
# Build function to retrieve energy.
E = -T.dot(nsamples[0], b_list[0]) * beta
for i in xrange(1, depth):
E -= T.sum(T.dot(nsamples[i-1], W_list[i] * beta) * nsamples[i],
axis=1)
E -= T.dot(nsamples[i], b_list[i] * beta)
energy_fn = theano.function([beta], E)
# Build inference function.
assert (pos_mf_steps or pos_sample_steps)
pos_steps = pos_mf_steps if pos_mf_steps else pos_sample_steps
new_psamples = _e_step(psamples, W_list, b_list, n_steps=pos_steps)
ups = OrderedDict()
for psample, new_psample in zip(psamples[1:], new_psamples[1:]):
ups[psample] = new_psample
temp = numpy.asarray(trainset.X, dtype=floatX)
mean_train = numpy.mean(temp, axis=0)
inference_fn = theano.function(inputs=[psamples[0]], outputs=[],
updates=ups)
# Configure baserate bias for (h0 if `marginalize_odd` else h1)
inference_fn(numpy.tile(mean_train, (batch_size, 1)))
numpy_psamples = [mean_train[None, :]] + \
[psample.get_value() for psample in psamples[1:]]
mean_pos = numpy.minimum(numpy_psamples[not marginalize_odd], 1-1e-5)
mean_pos = numpy.maximum(mean_pos, 1e-5)
pa_bias = -numpy.log(1./mean_pos[0] - 1.)
# Build Theano function to sample from interpolating distributions.
updates = OrderedDict()
new_nsamples = neg_sampling(W_list, b_list, nsamples, beta=beta,
pa_bias=pa_bias,
marginalize_odd=marginalize_odd,
theano_rng=theano_rng)
for (nsample, new_nsample) in zip(nsamples, new_nsamples):
updates[nsample] = new_nsample
sample_fn = theano.function([beta], [], updates=updates,
name='sample_func')
# Build function to compute free-energy of p_k(h1).
fe_bp_h1 = free_energy_at_beta(W_list, b_list, nsamples, beta,
pa_bias, marginalize_odd=marginalize_odd)
free_energy_fn = theano.function([beta], fe_bp_h1)
###########
## RUN AIS
###########
# Generate exact sample for the base model.
for i, nsample_i in enumerate(nsamples):
bias = pa_bias if i == 1 else b_list[i].get_value()
hi_mean_vec = 1. / (1. + numpy.exp(-bias))
hi_mean = numpy.tile(hi_mean_vec, (batch_size, 1))
r = rng.random_sample(hi_mean.shape)
hi_sample = numpy.array(hi_mean > r, dtype=floatX)
nsample_i.set_value(hi_sample)
# Default configuration for interpolating distributions
if large_ais:
betas = numpy.cast[floatX](
numpy.hstack((numpy.linspace(0, 0.5, 1e5+1)[:-1],
numpy.linspace(0.5, 0.9, 1e5+1)[:-1],
numpy.linspace(0.9, 1.0, 1e5))))
else:
betas = numpy.cast[floatX](
numpy.hstack((numpy.linspace(0, 0.5, 1e4+1)[:-1],
numpy.linspace(0.5, 0.9, 1e4+1)[:-1],
numpy.linspace(0.9, 1.0, 1e4))))
if log_z is None:
log_ais_w = compute_log_ais_weights(batch_size, free_energy_fn,
sample_fn, betas)
dlogz, var_dlogz = estimate_from_weights(log_ais_w)
log_za = compute_log_za(b_list, pa_bias, marginalize_odd)
log_z = log_za + dlogz
logging.info('log_z = %f' % log_z)
logging.info('log_za = %f' % log_za)
logging.info('dlogz = %f' % dlogz)
logging.info('var_dlogz = %f' % var_dlogz)
train_ll = compute_likelihood_given_logz(nsamples, psamples, batch_size,
energy_fn, inference_fn, log_z,
trainset.X)
logging.info('Training likelihood = %f' % train_ll)
test_ll = compute_likelihood_given_logz(nsamples, psamples, batch_size,
energy_fn, inference_fn, log_z,
testset.X)
logging.info('Test likelihood = %f' % test_ll)
return (train_ll, test_ll, log_z)
if __name__ == '__main__':
# Possible metrics
metrics = {'ais': estimate_likelihood}
datasets = {'mnist': MNIST}
# Argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("metric", help="the desired metric",
choices=metrics.keys())
parser.add_argument("dataset", help="the dataset used for computing the " +
"metric", choices=datasets.keys())
parser.add_argument("model_path", help="path to the pickled DBM model")
args = parser.parse_args()
metric = metrics[args.metric]
dataset = datasets[args.dataset]
model = serial.load(args.model_path)
layers = [model.visible_layer] + model.hidden_layers
W_list = [theano.shared(hidden_layer.get_weights())
for hidden_layer in model.hidden_layers]
b_list = [theano.shared(layer.get_biases()) for layer in layers]
trainset = dataset(which_set='train')
testset = dataset(which_set='test')
metric(W_list, b_list, trainset, testset, pos_mf_steps=5)
| bsd-3-clause | c272811532c5b1779aa68f99827312fe | 34.478448 | 105 | 0.615357 | 3.677837 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/utils/serial.py | 33 | 20145 | """
Utilities for serializing and deserializing python objects.
"""
try:
from cPickle import BadPickleGet
except ImportError:
BadPickleGet = KeyError
import pickle
import logging
import numpy as np
from theano.compat import six
from theano.compat.six.moves import cPickle, xrange
import os
import time
import warnings
import sys
from pylearn2.utils.string_utils import preprocess
from pylearn2.utils.mem import improve_memory_error_message
io = None
hdf_reader = None
import struct
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.string_utils import match
import shutil
logger = logging.getLogger(__name__)
def load(filepath, retry=True):
"""
Loads object(s) from file specified by 'filepath'.
Parameters
----------
filepath : str
A path to a file to load. Should be a pickle, Matlab, or NumPy
file; or a .txt or .amat file that numpy.loadtxt can load.
retry : bool, optional
If True, will make a handful of attempts to load the file before
giving up. This can be useful if you are for example calling
show_weights.py on a file that is actively being written to by a
training script--sometimes the load attempt might fail if the
training script writes at the same time show_weights tries to
read, but if you try again after a few seconds you should be able
to open the file.
Returns
-------
loaded_object : object
The object that was stored in the file.
"""
return _load(filepath, recurse_depth=0, retry=True)
def save(filepath, obj, on_overwrite='ignore'):
"""
Serialize `object` to a file denoted by `filepath`.
Parameters
----------
filepath : str
A filename. If the suffix is `.joblib` and joblib can be
imported, `joblib.dump` is used in place of the regular
pickling mechanisms; this results in much faster saves by
saving arrays as separate .npy files on disk. If the file
suffix is `.npy` than `numpy.save` is attempted on `obj`.
Otherwise, (c)pickle is used.
obj : object
A Python object to be serialized.
on_overwrite : str, optional
A string specifying what to do if the file already exists.
Possible values include:
- "ignore" : Just overwrite the existing file.
- "backup" : Make a backup copy of the file (<filepath>.bak).
Save the new copy. Then delete the backup copy. This allows
recovery of the old version of the file if saving the new one
fails.
"""
filepath = preprocess(filepath)
if os.path.exists(filepath):
if on_overwrite == 'backup':
backup = filepath + '.bak'
shutil.move(filepath, backup)
save(filepath, obj)
try:
os.remove(backup)
except Exception as e:
warnings.warn("Got an error while trying to remove " + backup
+ ":" + str(e))
return
else:
assert on_overwrite == 'ignore'
try:
_save(filepath, obj)
except RuntimeError as e:
""" Sometimes for large theano graphs, pickle/cPickle exceed the
maximum recursion depth. This seems to me like a fundamental
design flaw in pickle/cPickle. The workaround I employ here
is the one recommended to someone who had a similar problem
on stackexchange:
http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
Obviously this does not scale and could cause a crash
but I don't see another solution short of writing our
own implementation of pickle.
"""
if str(e).find('recursion') != -1:
logger.warning('pylearn2.utils.save encountered the following '
'error: ' + str(e) +
'\nAttempting to resolve this error by calling ' +
'sys.setrecusionlimit and retrying')
old_limit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(50000)
_save(filepath, obj)
finally:
sys.setrecursionlimit(old_limit)
def get_pickle_protocol():
"""
Allow configuration of the pickle protocol on a per-machine basis.
This way, if you use multiple platforms with different versions of
pickle, you can configure each of them to use the highest protocol
supported by all of the machines that you want to be able to
communicate.
"""
try:
protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL']
except KeyError:
# If not defined, we default to 0 because this is the default
# protocol used by cPickle.dump (and because it results in
# maximum portability)
protocol_str = '0'
if protocol_str == 'pickle.HIGHEST_PROTOCOL':
return pickle.HIGHEST_PROTOCOL
return int(protocol_str)
def _save(filepath, obj):
"""
.. todo::
WRITEME
"""
try:
import joblib
joblib_available = True
except ImportError:
joblib_available = False
if filepath.endswith('.npy'):
np.save(filepath, obj)
return
# This is dumb
# assert filepath.endswith('.pkl')
save_dir = os.path.dirname(filepath)
# Handle current working directory case.
if save_dir == '':
save_dir = '.'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if os.path.exists(save_dir) and not os.path.isdir(save_dir):
raise IOError("save path %s exists, not a directory" % save_dir)
elif not os.access(save_dir, os.W_OK):
raise IOError("permission error creating %s" % filepath)
try:
if joblib_available and filepath.endswith('.joblib'):
joblib.dump(obj, filepath)
else:
if filepath.endswith('.joblib'):
warnings.warn('Warning: .joblib suffix specified but joblib '
'unavailable. Using ordinary pickle.')
with open(filepath, 'wb') as filehandle:
cPickle.dump(obj, filehandle, get_pickle_protocol())
except Exception as e:
logger.exception("cPickle has failed to write an object to "
"{0}".format(filepath))
if str(e).find('maximum recursion depth exceeded') != -1:
raise
try:
logger.info('retrying with pickle')
with open(filepath, "wb") as f:
pickle.dump(obj, f)
except Exception as e2:
if str(e) == '' and str(e2) == '':
logger.exception('neither cPickle nor pickle could write to '
'{0}'.format(filepath))
logger.exception(
'moreover, neither of them raised an exception that '
'can be converted to a string'
)
logger.exception(
'now re-attempting to write with cPickle outside the '
'try/catch loop so you can see if it prints anything '
'when it dies'
)
with open(filepath, 'wb') as f:
cPickle.dump(obj, f, get_pickle_protocol())
logger.info('Somehow or other, the file write worked once '
'we quit using the try/catch.')
else:
if str(e2) == 'env':
raise
import pdb
tb = pdb.traceback.format_exc()
reraise_as(IOError(str(obj) +
' could not be written to ' +
str(filepath) +
' by cPickle due to ' + str(e) +
' nor by pickle due to ' + str(e2) +
'. \nTraceback ' + tb))
logger.warning('{0} was written by pickle instead of cPickle, due to '
'{1} (perhaps your object'
' is really big?)'.format(filepath, e))
def clone_via_serialize(obj):
"""
Makes a "deep copy" of an object by serializing it and then
deserializing it.
Parameters
----------
obj : object
The object to clone.
Returns
-------
obj2 : object
A copy of the object.
"""
s = cPickle.dumps(obj, get_pickle_protocol())
return cPickle.loads(s)
def to_string(obj):
"""
Serializes an object to a string.
Parameters
----------
obj : object
The object to serialize.
Returns
-------
string : str
The object serialized as a string.
"""
return cPickle.dumps(obj, get_pickle_protocol())
def from_string(s):
"""
Deserializes an object from a string.
Parameters
----------
s : str
The object serialized as a string.
Returns
-------
obj : object
The object.
"""
return cPickle.loads(s)
def mkdir(filepath):
"""
Make a directory.
Should succeed even if it needs to make more than one
directory and nest subdirectories to do so. Raises an error if the
directory can't be made. Does not raise an error if the directory
already exists.
Parameters
----------
filepath : WRITEME
"""
try:
os.makedirs(filepath)
except OSError:
if not os.path.isdir(filepath):
raise
def read_int(fin, n=1):
"""
Reads n ints from a file.
Parameters
----------
fin : file
Readable file object
n : int
Number of ints to read
Returns
-------
rval : int or list
The integer or integers requested
"""
if n == 1:
s = fin.read(4)
if len(s) != 4:
raise ValueError('fin did not contain 4 bytes')
return struct.unpack('i', s)[0]
else:
rval = []
for i in xrange(n):
rval.append(read_int(fin))
return rval
# dictionary to convert lush binary matrix magic numbers
# to dtypes
lush_magic = {
507333717: 'uint8',
507333716: 'int32',
507333713: 'float32',
507333715: 'float64'
}
def read_bin_lush_matrix(filepath):
"""
Reads a binary matrix saved by the lush library.
Parameters
----------
filepath : str
The path to the file.
Returns
-------
matrix : ndarray
A NumPy version of the stored matrix.
"""
f = open(filepath, 'rb')
try:
magic = read_int(f)
except ValueError:
reraise_as("Couldn't read magic number")
ndim = read_int(f)
if ndim == 0:
shape = ()
else:
shape = read_int(f, max(3, ndim))
total_elems = 1
for dim in shape:
total_elems *= dim
try:
dtype = lush_magic[magic]
except KeyError:
reraise_as(ValueError('Unrecognized lush magic number ' + str(magic)))
rval = np.fromfile(file=f, dtype=dtype, count=total_elems)
excess = f.read(-1)
if excess:
raise ValueError(str(len(excess)) +
' extra bytes found at end of file.'
' This indicates mismatch between header '
'and content')
rval = rval.reshape(*shape)
f.close()
return rval
def load_train_file(config_file_path, environ=None):
"""
Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables
Parameters
----------
config_file_path : str
Path to a config file containing a YAML string describing a
pylearn2.train.Train object
environ : dict, optional
A dictionary used for ${FOO} substitutions in addition to
environment variables when parsing the YAML file. If a key appears
both in `os.environ` and this dictionary, the value in this
dictionary is used.
Returns
-------
Object described by the YAML string stored in the config file
"""
from pylearn2.config import yaml_parse
suffix_to_strip = '.yaml'
# Publish environment variables related to file name
if config_file_path.endswith(suffix_to_strip):
config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
else:
config_file_full_stem = config_file_path
os.environ["PYLEARN2_TRAIN_FILE_FULL_STEM"] = config_file_full_stem
directory = config_file_path.split('/')[:-1]
directory = '/'.join(directory)
if directory != '':
directory += '/'
os.environ["PYLEARN2_TRAIN_DIR"] = directory
os.environ["PYLEARN2_TRAIN_BASE_NAME"] = config_file_path.split('/')[-1]
os.environ[
"PYLEARN2_TRAIN_FILE_STEM"] = config_file_full_stem.split('/')[-1]
return yaml_parse.load_path(config_file_path, environ=environ)
def _load(filepath, recurse_depth=0, retry=True):
"""
Recursively tries to load a file until success or maximum number of
attempts.
Parameters
----------
filepath : str
A path to a file to load. Should be a pickle, Matlab, or NumPy
file; or a .txt or .amat file that numpy.loadtxt can load.
recurse_depth : int, optional
End users should not use this argument. It is used by the function
itself to implement the `retry` option recursively.
retry : bool, optional
If True, will make a handful of attempts to load the file before
giving up. This can be useful if you are for example calling
show_weights.py on a file that is actively being written to by a
training script--sometimes the load attempt might fail if the
training script writes at the same time show_weights tries to
read, but if you try again after a few seconds you should be able
to open the file.
Returns
-------
loaded_object : object
The object that was stored in the file.
"""
try:
import joblib
joblib_available = True
except ImportError:
joblib_available = False
if recurse_depth == 0:
filepath = preprocess(filepath)
if filepath.endswith('.npy') or filepath.endswith('.npz'):
return np.load(filepath)
if filepath.endswith('.amat') or filepath.endswith('txt'):
try:
return np.loadtxt(filepath)
except Exception:
reraise_as("{0} cannot be loaded by serial.load (trying "
"to use np.loadtxt)".format(filepath))
if filepath.endswith('.mat'):
global io
if io is None:
import scipy.io
io = scipy.io
try:
return io.loadmat(filepath)
except NotImplementedError as nei:
if str(nei).find('HDF reader') != -1:
global hdf_reader
if hdf_reader is None:
import h5py
hdf_reader = h5py
return hdf_reader.File(filepath, 'r')
else:
raise
# this code should never be reached
assert False
# for loading PY2 pickle in PY3
encoding = {'encoding': 'latin-1'} if six.PY3 else {}
def exponential_backoff():
if recurse_depth > 9:
logger.info('Max number of tries exceeded while trying to open '
'{0}'.format(filepath))
logger.info('attempting to open via reading string')
with open(filepath, 'rb') as f:
content = f.read()
return cPickle.loads(content, **encoding)
else:
nsec = 0.5 * (2.0 ** float(recurse_depth))
logger.info("Waiting {0} seconds and trying again".format(nsec))
time.sleep(nsec)
return _load(filepath, recurse_depth + 1, retry)
try:
if not joblib_available:
with open(filepath, 'rb') as f:
obj = cPickle.load(f, **encoding)
else:
try:
obj = joblib.load(filepath)
except Exception as e:
if os.path.exists(filepath) and not os.path.isdir(filepath):
raise
raise_cannot_open(filepath)
except MemoryError as e:
# We want to explicitly catch this exception because for MemoryError
# __str__ returns the empty string, so some of our default printouts
# below don't make a lot of sense.
# Also, a lot of users assume any exception is a bug in the library,
# so we can cut down on mail to pylearn-users by adding a message
# that makes it clear this exception is caused by their machine not
# meeting requirements.
if os.path.splitext(filepath)[1] == ".pkl":
improve_memory_error_message(e,
("You do not have enough memory to "
"open %s \n"
" + Try using numpy.{save,load} "
"(file with extension '.npy') "
"to save your file. It uses less "
"memory when reading and "
"writing files than pickled files.")
% filepath)
else:
improve_memory_error_message(e,
"You do not have enough memory to "
"open %s" % filepath)
except (BadPickleGet, EOFError, KeyError) as e:
if not retry:
reraise_as(e.__class__('Failed to open {0}'.format(filepath)))
obj = exponential_backoff()
except ValueError:
logger.exception
if not retry:
reraise_as(ValueError('Failed to open {0}'.format(filepath)))
obj = exponential_backoff()
except Exception:
# assert False
reraise_as("Couldn't open {0}".format(filepath))
# if the object has no yaml_src, we give it one that just says it
# came from this file. could cause trouble if you save obj again
# to a different location
if not hasattr(obj, 'yaml_src'):
try:
obj.yaml_src = '!pkl: "' + os.path.abspath(filepath) + '"'
except Exception:
pass
return obj
def raise_cannot_open(path):
"""
Raise an exception saying we can't open `path`.
Parameters
----------
path : str
The path we cannot open
"""
pieces = path.split('/')
for i in xrange(1, len(pieces) + 1):
so_far = '/'.join(pieces[0:i])
if not os.path.exists(so_far):
if i == 1:
if so_far == '':
continue
reraise_as(IOError('Cannot open ' + path + ' (' + so_far +
' does not exist)'))
parent = '/'.join(pieces[0:i - 1])
bad = pieces[i - 1]
if not os.path.isdir(parent):
reraise_as(IOError("Cannot open " + path + " because " +
parent + " is not a directory."))
candidates = os.listdir(parent)
if len(candidates) == 0:
reraise_as(IOError("Cannot open " + path + " because " +
parent + " is empty."))
if len(candidates) > 100:
# Don't attempt to guess the right name if the directory is
# huge
reraise_as(IOError("Cannot open " + path + " but can open " +
parent + "."))
if os.path.islink(path):
reraise_as(IOError(path + " appears to be a symlink to a "
"non-existent file"))
reraise_as(IOError("Cannot open " + path + " but can open " +
parent + ". Did you mean " + match(bad, candidates) +
" instead of " + bad + "?"))
# end if
# end for
assert False
| bsd-3-clause | 9f1635ba770f56492e91b962a0949c0e | 31.544426 | 115 | 0.558451 | 4.441138 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/costs/ebm_estimation.py | 34 | 11066 | """
Training costs for unsupervised learning of energy-based models
"""
from functools import wraps
import logging
import numpy as np
import sys
from theano import scan
import theano.tensor as T
from theano.compat.six.moves import zip as izip
from pylearn2.compat import OrderedDict
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
from pylearn2.utils import py_integer_types
from pylearn2.utils.rng import make_theano_rng
from pylearn2.models.rbm import BlockGibbsSampler
logger = logging.getLogger(__name__)
logger.debug("Cost changing the recursion limit.")
# We need this to be high enough that the big theano graphs we make
# when unrolling inference don't cause python to complain.
# python intentionally declares stack overflow well before the stack
# segment is actually exceeded. But we can't make this value too big
# either, or we'll get seg faults when the python interpreter really
# does go over the stack segment.
# IG encountered seg faults on eos3 (a machine at LISA labo) when using
# 50000 so for now it is set to 40000.
# I think the actual safe recursion limit can't be predicted in advance
# because you don't know how big of a stack frame each function will
# make, so there is not really a "correct" way to do this. Really the
# python interpreter should provide an option to raise the error
# precisely when you're going to exceed the stack segment.
sys.setrecursionlimit(40000)
class NCE(DefaultDataSpecsMixin, Cost):
"""
Noise-Contrastive Estimation
See "Noise-Contrastive Estimation: A new estimation principle for
unnormalized models" by Gutmann and Hyvarinen
Parameters
----------
noise : WRITEME
A Distribution from which noisy examples are generated
noise_per_clean : int
Number of noisy examples to generate for each clean example given
"""
def h(self, X, model):
"""
Computes `h` from the NCE paper.
Parameters
----------
X : Theano matrix
Batch of input data
model : Model
Any model with a `log_prob` method.
Returns
-------
h : A theano symbol for the `h` function from the paper.
"""
return - T.nnet.sigmoid(self.G(X, model))
def G(self, X, model):
"""
Computes `G` from the NCE paper.
Parameters
----------
X : Theano matrix
Batch of input data
model : Model
Any model with a `log_prob` method.
Returns
-------
G : A theano symbol for the `G` function from the paper.
"""
return model.log_prob(X) - self.noise.log_prob(X)
def expr(self, model, data, noisy_data=None):
"""
Computes the NCE objective.
Parameters
----------
model : Model
Any Model that implements a `log_probs` method.
data : Theano matrix
noisy_data : Theano matrix, optional
The noise samples used for noise-contrastive
estimation. Will be generated internally if not
provided. The keyword argument allows FixedVarDescr
to provide the same noise across several steps of
a line search.
"""
space, source = self.get_data_specs(model)
space.validate(data)
X = data
if X.name is None:
X_name = 'X'
else:
X_name = X.name
m_data = X.shape[0]
m_noise = m_data * self.noise_per_clean
if noisy_data is not None:
space.validate(noisy_data)
Y = noisy_data
else:
Y = self.noise.random_design_matrix(m_noise)
log_hx = -T.nnet.softplus(-self.G(X, model))
log_one_minus_hy = -T.nnet.softplus(self.G(Y, model))
# based on equation 3 of the paper
# ours is the negative of theirs because
# they maximize it and we minimize it
rval = -T.mean(log_hx)-T.mean(log_one_minus_hy)
rval.name = 'NCE('+X_name+')'
return rval
def __init__(self, noise, noise_per_clean):
self.noise = noise
assert isinstance(noise_per_clean, py_integer_types)
self.noise_per_clean = noise_per_clean
class SM(DefaultDataSpecsMixin, Cost):
"""
(Regularized) Score Matching
See:
- "Regularized estimation of image statistics by Score Matching",
D. Kingma, Y. LeCun, NIPS 2010
- eqn. 4 of "On Autoencoders and Score Matching for Energy Based Models"
Swersky et al 2011
Uses the mean over visible units rather than sum over visible units
so that hyperparameters won't depend as much on the # of visible units
Parameters
----------
lambd : WRITEME
"""
def __init__(self, lambd=0):
assert lambd >= 0
self.lambd = lambd
@wraps(Cost.expr)
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data)
X = data
X_name = 'X' if X.name is None else X.name
def f(i, _X, _dx):
return T.grad(_dx[:, i].sum(), _X)[:, i]
dx = model.score(X)
ddx, _ = scan(f, sequences=[T.arange(X.shape[1])],
non_sequences=[X, dx])
ddx = ddx.T
assert len(ddx.type.broadcastable) == 2
rval = T.mean(0.5 * dx**2 + ddx + self.lambd * ddx**2)
rval.name = 'sm('+X_name+')'
return rval
class SMD(DefaultDataSpecsMixin, Cost):
"""
Denoising Score Matching
See eqn. 4.3 of
"A Connection Between Score Matching and Denoising Autoencoders"
by Pascal Vincent for details
Note that instead of using half the squared norm we use the mean
squared error, so that hyperparameters don't depend as much on
the # of visible units
Parameters
----------
corruptor : WRITEME
WRITEME
"""
def __init__(self, corruptor):
super(SMD, self).__init__()
self.corruptor = corruptor
@wraps(Cost.expr)
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data)
X = data
X_name = 'X' if X.name is None else X.name
corrupted_X = self.corruptor(X)
if corrupted_X.name is None:
corrupted_X.name = 'corrupt('+X_name+')'
model_score = model.score(corrupted_X)
assert len(model_score.type.broadcastable) == len(X.type.broadcastable)
parzen_score = T.grad(
- T.sum(self.corruptor.corruption_free_energy(corrupted_X, X)),
corrupted_X)
assert \
len(parzen_score.type.broadcastable) == len(X.type.broadcastable)
score_diff = model_score - parzen_score
score_diff.name = 'smd_score_diff('+X_name+')'
assert len(score_diff.type.broadcastable) == len(X.type.broadcastable)
# TODO: this could probably be faster as a tensordot,
# but we don't have tensordot for gpu yet
sq_score_diff = T.sqr(score_diff)
# sq_score_diff = Print('sq_score_diff',attrs=['mean'])(sq_score_diff)
smd = T.mean(sq_score_diff)
smd.name = 'SMD('+X_name+')'
return smd
class SML(Cost):
"""
Stochastic Maximum Likelihood
See "On the convergence of Markovian stochastic algorithms with rapidly
decreasing ergodicity rates" by Laurent Younes (1998)
Also known as Persistent Constrastive Divergence (PCD)
See "Training restricted boltzmann machines using approximations to
the likelihood gradient" by Tijmen Tieleman (2008)
The number of particles fits the batch size.
Parameters
----------
batch_size: int
Batch size of the training algorithm
nsteps: int
Number of steps made by the block Gibbs sampler between each epoch
"""
def __init__(self, batch_size, nsteps):
super(SML, self).__init__()
self.nchains = batch_size
self.nsteps = nsteps
@wraps(Cost.get_gradients)
def get_gradients(self, model, data, **kwargs):
cost = self._cost(model, data, **kwargs)
params = list(model.get_params())
grads = T.grad(cost, params, disconnected_inputs='ignore',
consider_constant=[self.sampler.particles])
gradients = OrderedDict(izip(params, grads))
updates = OrderedDict()
sampler_updates = self.sampler.updates()
updates.update(sampler_updates)
return gradients, updates
def _cost(self, model, data):
"""
A fake cost that we differentiate symbolically to derive the SML
update rule.
Parameters
----------
model : Model
data : Batch in get_data_specs format
Returns
-------
cost : 0-d Theano tensor
The fake cost
"""
if not hasattr(self, 'sampler'):
self.sampler = BlockGibbsSampler(
rbm=model,
particles=0.5+np.zeros((self.nchains, model.get_input_dim())),
rng=model.rng,
steps=self.nsteps)
# compute negative phase updates
sampler_updates = self.sampler.updates()
# Compute SML cost
pos_v = data
neg_v = self.sampler.particles
ml_cost = (model.free_energy(pos_v).mean() -
model.free_energy(neg_v).mean())
return ml_cost
@wraps(Cost.expr)
def expr(self, model, data):
return None
@wraps(Cost.get_data_specs)
def get_data_specs(self, model):
return (model.get_input_space(), model.get_input_source())
class CDk(Cost):
"""
Contrastive Divergence.
See "Training products of experts by minimizing contrastive divergence"
by Geoffrey E. Hinton (2002)
Parameters
----------
nsteps : int
Number of Markov chain steps for the negative sample
seed : int
Seed for the random number generator
"""
def __init__(self, nsteps, seed=42):
super(CDk, self).__init__()
self.nsteps = nsteps
self.rng = make_theano_rng(seed, which_method='binomial')
def _cost(self, model, data):
pos_v = data
neg_v = data
for k in range(self.nsteps):
[neg_v, _locals] = model.gibbs_step_for_v(neg_v, self.rng)
# Compute CD cost
ml_cost = (model.free_energy(pos_v).mean() -
model.free_energy(neg_v).mean())
return ml_cost, neg_v
@wraps(Cost.get_gradients)
def get_gradients(self, model, data, **kwargs):
cost, neg_v = self._cost(model, data, **kwargs)
params = list(model.get_params())
grads = T.grad(cost, params, disconnected_inputs='ignore',
consider_constant=[neg_v])
gradients = OrderedDict(izip(params, grads))
updates = OrderedDict()
return gradients, updates
@wraps(Cost.expr)
def expr(self, model, data):
return None
@wraps(Cost.expr)
def get_data_specs(self, model):
return (model.get_input_space(), model.get_input_source())
| bsd-3-clause | 3a5a1649e2403c52c14b5639b41045ed | 28.121053 | 79 | 0.604735 | 3.827741 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/train_extensions/__init__.py | 49 | 5909 | """Plugins for the Train object."""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import functools
import logging
import numpy as np
logger = logging.getLogger(__name__)
class TrainExtension(object):
"""
An object called by pylearn2.train.Train at various
points during learning.
Useful for adding custom features to the basic learning
procedure.
This base class implements all callback methods as no-ops.
To add a feature to the Train class, implement a subclass of this
base class that overrides any subset of these no-op methods.
"""
def on_save(self, model, dataset, algorithm):
"""
Train calls this immediately before it saves the model.
Parameters
----------
model : pylearn2.models.Model
The model object being trained.
dataset : pylearn2.datasets.Dataset
The dataset object used for training.
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
The object representing the training algorithm being
used to train the model.
"""
def on_monitor(self, model, dataset, algorithm):
"""
Train calls this immediately after each call to the Monitor
(i.e., when training begins, and at the end of each epoch).
Parameters
----------
model : pylearn2.models.Model
The model object being trained
dataset : pylearn2.datasets.Dataset
The dataset object being trained.
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
The object representing the training algorithm being
used to train the model.
"""
def setup(self, model, dataset, algorithm):
"""
Train calls this immediately upon instantiation,
before any monitoring is done.
Parameters
----------
model : pylearn2.models.Model
The model object being trained.
dataset : pylearn2.datasets.Dataset
The dataset object being trained.
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
The object representing the training algorithm being
used to train the model.
"""
class SharedSetter(TrainExtension):
"""
Sets shared variables to take on the specified values after the
specified amounts of epochs have taken place.
epoch_updates = [ [i, x, y] ]
means run x.set_value(cast(y))
after i epochs have passed.
Parameters
----------
epoch_updates : WRITEME
"""
def __init__(self, epoch_updates):
self._count = 0
self._epoch_to_updates = {}
self._vars = set([])
for update in epoch_updates:
epoch, var, val = update
self._vars.add(var)
if epoch not in self._epoch_to_updates:
self._epoch_to_updates[epoch] = []
assert hasattr(var, 'get_value')
assert var.name is not None
self._epoch_to_updates[epoch].append((var,val))
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
# TODO: write more specific docstring
if self._count == 0:
monitor = model.monitor
# TODO: make Monitor support input-less channels so this hack
# isn't necessary
hack = monitor.channels.values()[0]
for var in self._vars:
monitor.add_channel(name=var.name, val=var,
ipt=hack.graph_input, dataset=hack.dataset)
if self._count in self._epoch_to_updates:
for update in self._epoch_to_updates[self._count]:
var, val = update
var.set_value(np.cast[var.dtype](val))
self._count += 1
class ChannelSmoother(TrainExtension):
"""
Makes a smoothed version of a monitoring channel by averaging together
the k most recent values of that channel.
This is a little bit dangerous because if other TrainExtensions depend
on the channel being up to date they must appear after this one in the
extensions list. A better long term solution would be to make the Monitor
support this kind of channel directly instead of hacking it in.
Note that the Monitor will print this channel as having a value of -1, and
then the extension will print the right value.
Parameters
----------
channel_to_smooth : WRITEME
channel_to_publish : WRITEME
k : WRITEME
"""
def __init__(self, channel_to_smooth, channel_to_publish, k=5):
self.__dict__.update(locals())
del self.self
@functools.wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
# TODO: more specific docstring
monitor = model.monitor
channels = monitor.channels
channel_to_smooth = channels[self.channel_to_smooth]
ipt = channel_to_smooth.graph_input
dataset = channel_to_smooth.dataset
monitor.add_channel(name=self.channel_to_publish,
ipt=ipt,
val=-1.,
dataset=dataset)
self.in_ch = channel_to_smooth
self.out_ch = channels[self.channel_to_publish]
@functools.wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
# TODO: write more specific docstring
val_record = self.in_ch.val_record
start = max(0, len(val_record) - self.k + 1)
values = val_record[start:]
mean = sum(values) / float(len(values))
self.out_ch.val_record[-1] = mean
logger.info('\t{0}: {1}'.format(self.channel_to_publish, mean))
| bsd-3-clause | 6c7714a92efc601a428708dfdfea5d67 | 32.196629 | 79 | 0.620917 | 4.412995 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/scripts/summarize_model.py | 44 | 2999 | #!/usr/bin/env python
"""
This script summarizes a model by showing some statistics about
the parameters and checking whether the model completed
training succesfully
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import argparse
import numpy as np
from pylearn2.compat import first_key
from pylearn2.utils import serial
def summarize(path):
"""
Summarize the model
Parameters
----------
path : str
The path to the pickled model to summarize
"""
model = serial.load(path)
for param in model.get_params():
name = param.name
if name is None:
name = '<anon>'
v = param.get_value()
print(name + ': ' + str((v.min(), v.mean(), v.max())), end='')
print(str(v.shape))
if np.sign(v.min()) != np.sign(v.max()):
v = np.abs(v)
print('abs(' + name + '): ' + str((v.min(), v.mean(), v.max())))
if v.ndim == 2:
row_norms = np.sqrt(np.square(v).sum(axis=1))
print(name + " row norms:", end='')
print((row_norms.min(), row_norms.mean(), row_norms.max()))
col_norms = np.sqrt(np.square(v).sum(axis=0))
print(name + " col norms:", end='')
print((col_norms.min(), col_norms.mean(), col_norms.max()))
if hasattr(model, 'monitor'):
print('trained on', model.monitor.get_examples_seen(), 'examples')
print('which corresponds to ', end='')
print(model.monitor.get_batches_seen(), 'batches')
key = first_key(model.monitor.channels)
hour = float(model.monitor.channels[key].time_record[-1]) / 3600.
print('Trained for {0} hours'.format(hour))
try:
print(model.monitor.get_epochs_seen(), 'epochs')
except Exception:
pass
if hasattr(model.monitor, 'training_succeeded'):
if model.monitor.training_succeeded:
print('Training succeeded')
else:
print('Training was not yet completed ' +
'at the time of this save.')
else:
print('This pickle file is damaged, or was made before the ' +
'Monitor tracked whether training completed.')
def make_argument_parser():
"""
Creates an ArgumentParser to read the options for this script from
sys.argv
"""
parser = argparse.ArgumentParser(
description="Print some parameter statistics of a pickled model "
"and check if it completed training succesfully."
)
parser.add_argument('path',
help='The pickled model to summarize')
return parser
if __name__ == "__main__":
parser = make_argument_parser()
args = parser.parse_args()
summarize(args.path)
| bsd-3-clause | a2449dff6375002e8a64399017b2a66e | 31.956044 | 76 | 0.579193 | 3.935696 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/models/mnd.py | 49 | 4148 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.models.model import Model
from pylearn2.utils import sharedX
import numpy as np
import theano.tensor as T
class DiagonalMND(Model):
"""
A model based on the multivariate normal distribution. This variant is
constrained to have diagonal covariance.
Parameters
----------
nvis : WRITEME
init_beta : WRITEME
init_mu : WRITEME
min_beta : WRITEME
max_beta : WRITEME
"""
# TODO: unify this with distribution.mnd
def __init__(self, nvis,
init_beta,
init_mu,
min_beta,
max_beta):
#copy all arguments to the object
self.__dict__.update( locals() )
del self.self
super(DiagonalMND,self).__init__()
#build the object
self.redo_everything()
def redo_everything(self):
"""
.. todo::
WRITEME
"""
self.beta = sharedX(np.ones((self.nvis,))*self.init_beta,'beta')
self.mu = sharedX(np.ones((self.nvis,))*self.init_mu,'mu')
self.redo_theano()
def free_energy(self, X):
"""
.. todo::
WRITEME
"""
diff = X-self.mu
sq = T.sqr(diff)
return 0.5 * T.dot( sq, self.beta )
def log_prob(self, X):
"""
.. todo::
WRITEME
"""
return -self.free_energy(X) - self.log_partition_function()
def log_partition_function(self):
"""
.. todo::
WRITEME
"""
# Z^-1 = (2pi)^{-nvis/2} det( beta^-1 )^{-1/2}
# Z = (2pi)^(nvis/2) sqrt( det( beta^-1) )
# log Z = (nvis/2) log 2pi - (1/2) sum(log(beta))
return float(self.nvis)/2. * np.log(2*np.pi) - 0.5 * T.sum(T.log(self.beta))
def redo_theano(self):
"""
.. todo::
WRITEME
"""
init_names = dir(self)
self.censored_updates = {}
for param in self.get_params():
self.censored_updates[param] = set([])
final_names = dir(self)
self.register_names_to_del( [name for name in final_names if name not in init_names])
def _modify_updates(self, updates):
"""
.. todo::
WRITEME
"""
if self.beta in updates and updates[self.beta] not in self.censored_updates[self.beta]:
updates[self.beta] = T.clip(updates[self.beta], self.min_beta, self.max_beta )
params = self.get_params()
for param in updates:
if param in params:
self.censored_updates[param] = self.censored_updates[param].union(set([updates[param]]))
def get_params(self):
"""
.. todo::
WRITEME
"""
return [self.mu, self.beta ]
def kl_divergence(q,p):
"""
.. todo::
WRITEME
"""
#KL divergence of two DiagonalMNDs
#http://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#KL_divergence_for_Normal_Distributions
#D_KL(q||p) = 0.5 ( beta_q^T beta_p^-1 + beta_p^T sq(mu_p - mu_q) - log(det Siga_q / det Sigma_p) - k)
assert isinstance(q,DiagonalMND)
assert isinstance(p,DiagonalMND)
assert q.nvis == p.nvis
k = q.nvis
beta_q = q.beta
beta_p = p.beta
beta_q_inv = 1./beta_q
trace_term = T.dot(beta_q_inv,beta_p)
assert trace_term.ndim == 0
mu_p = p.mu
mu_q = q.mu
quad_term = T.dot(beta_p, T.sqr(mu_p-mu_q))
assert quad_term.ndim == 0
# - log ( det Sigma_q / det Sigma_p)
# = log det Sigma_p - log det Sigma_q
# = log det Beta_p_inv - log det Beta_q_inv
# = sum(log(beta_p_inv)) - sum(log(beta_q_inv))
# = sum(log(beta_q)) - sum(log(beta_p))
log_term = T.sum(T.log(beta_q)) - T.sum(T.log(beta_p))
assert log_term.ndim == 0
inside_parens = trace_term + quad_term + log_term - k
assert inside_parens.ndim == 0
rval = 0.5 * inside_parens
return rval
| bsd-3-clause | c6b9c32aa570dc2d2fa491ffa122bbac | 23.544379 | 108 | 0.543877 | 3.222999 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/utils/tests/test_general.py | 45 | 1096 | """
Tests for pylearn2.utils.general functions.
"""
from pylearn2.utils import contains_nan, contains_inf, isfinite
import numpy as np
def test_contains_nan():
"""
Tests that pylearn2.utils.contains_nan correctly
identifies `np.nan` values in an array.
"""
arr = np.random.random(100)
assert not contains_nan(arr)
arr[0] = np.nan
assert contains_nan(arr)
def test_contains_inf():
"""
Tests that pylearn2.utils.contains_inf correctly
identifies `np.inf` values in an array.
"""
arr = np.random.random(100)
assert not contains_inf(arr)
arr[0] = np.nan
assert not contains_inf(arr)
arr[1] = np.inf
assert contains_inf(arr)
arr[1] = -np.inf
assert contains_inf(arr)
def test_isfinite():
"""
Tests that pylearn2.utils.isfinite correctly
identifies `np.nan` and `np.inf` values in an array.
"""
arr = np.random.random(100)
assert isfinite(arr)
arr[0] = np.nan
assert not isfinite(arr)
arr[0] = np.inf
assert not isfinite(arr)
arr[0] = -np.inf
assert not isfinite(arr)
| bsd-3-clause | 47c760e1650f3b491f82a8e22503a7de | 22.826087 | 63 | 0.644161 | 3.321212 | false | true | false | false |
lisa-lab/pylearn2 | pylearn2/dataset_get/dataset-get.py | 44 | 36792 | #!/usr/bin/env python
# -*- coding: utf-8
########################################
#
#
# This file is intentionally monolithic.
# It also intentionally restricts itself
# to standard library modules, with no
# extra dependencies.
#
from __future__ import print_function
__authors__ = "Steven Pigeon"
__copyright__ = "(c) 2012, Université de Montréal"
__contact__ = "Steven Pigeon: pigeon@iro.umontreal.ca"
__version__ = "dataset-get 0.1"
__licence__ = "BSD 3-Clause http://www.opensource.org/licenses/BSD-3-Clause "
import logging
import re,os,sys,shutil,time
import warnings
import urllib,urllib2
import tarfile
import subprocess
from theano.compat.six.moves import input
logger = logging.getLogger(__name__)
########################################
class package_info:
"""
A simple class to structure
the package's information
"""
def __init__(self, cf, name,ts,rs,src,whr):
self.configuration_file=cf # in which configuration file was it found?
self.name=name # the short name, e.g., "mnist"
self.timestamp=int(ts) # a unix ctime
self.readable_size=rs # a human-readable size, e.g., "401.3MB"
self.source=src # the web source
self.where=whr # where on this machine
########################################
#
# Global variables for the whole module.
#
dataset_sources="sources.lst"
dataset_web="http://www.stevenpigeon.org/secret"
dataset_conf_path=""
dataset_data_path=""
root_conf_path=None
root_data_path=None
user_conf_path=None
user_data_path=None
super_powers=False
# both dictionaries for fast search
# (but are semantically lists)
packages_sources={}
installed_packages_list={}
########################################
def local_path_as_url( filename ):
"""
Takes a local, OS-specific path or
filename and transforms it into an
url starting with file:// (it
simplifies a lot of things).
:param filename: a relative or absolute pathname
:returns: the urlified absolute path
"""
return "file://"+urllib.pathname2url(os.path.abspath(filename))
########################################
def has_super_powers():
"""
Determines whether or not the program
is run as root.
:returns: true if run as root, false otherwise
"""
return os.geteuid()==0
########################################
def corename( filename ):
"""
returns the 'corename' of a file. For
example, corename("thingie.tar.bz2")
returns "thingie" (a totally correct
way of doing this would be to use
MIME-approved standard extensions, in
order to distinguish from, say a file
"thingie.tar.bz2" and another file
"my.data.tar.bz2"---for which we would
have only "my" as corename)
:param filename: a (base) filename
:returns: the "core" filename
"""
f1=None
f2=os.path.basename(filename)
# repeatedly remove the right-most
# extension, until none is found
#
while f1 != f2:
f1=f2
(f2,ext)=os.path.splitext(f1)
return f2
########################################
def get_timestamp_from_url( url ):
"""
Gets the Last-Modified field from the
http header associated with the file
pointed to by the url. Raises whatever
exception urllib2.urlopen raises.
It can't lookup local file, unless they
are presented as a file:/// url.
:param url: a filename or an url
:returns: the last-modified timestamp
"""
obj = urllib2.urlopen( url )
return time.strptime(
obj.info()["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT") # RFC 2822 date format
########################################
def download_from_url( url, filename=None, progress_hook=None ):
"""
Downloads a file from an URL in the
specificed filename (or, if filename
is None, to a temporary location).
Returns the location of the downloaded
file.
:param url: url of the file to download
:param filename: filename to download to (None means a temp file is created)
:param progress_hook: a download hook to display progress
:returns: the filename where the file was downloaded
"""
(temp_filename, headers)=urllib.urlretrieve( url,filename,progress_hook )
return temp_filename
########################################
def file_access_rights( filename, rights, check_above=False ):
"""
Determines if a file has given rights.
If the file exists, it tests for its
rights. If it does not exist, and
check_above=True, then it checks for
the directory's rights, to test for
write/creation rights.
:param filename: filename of the file to assess
:param rights: rights to be tested
:param check_above: Check directory rights if file does not exist.
:returns: boolean, whether 'rights' rights are OK
"""
if os.path.exists(filename):
return os.access(filename, rights)
else:
if check_above:
return os.access(os.path.dirname(os.path.abspath(filename)), rights)
else:
return False
########################################
def atomic_replace( src_filename, dst_filename ):
"""
Does an "atomic" replace of a file by another.
If both files reside on the fame FS
device, atomic_replace does a regular
move. If not, the source file is first
copied to a temporary location on the
same FS as the destination, then a
regular move is performed.
caveat: the destination FS must have
enough storage left for the temporary
file.
:param src_filename: The file to replace from
:param dst_filename: The file to be replaced
:raises: whatever shutil raises
"""
####################################
def same_fs( filename_a, filename_b):
"""
Checks if both files reside on the
same FS device
"""
stats_a = os.stat(filename_a)
stats_b = os.stat(filename_b)
return stats_a.st_dev == stats_b.st_dev;
if os.path.exists(dst_filename) and not same_fs(src_filename,dst_filename):
# deals with non-atomic move
#
dst_path = os.path.dirname(os.path.abspath(dst_filename))
dst_temp_filename=os.tempnam(dst_path);
shutil.copy(src_filename, dst_temp_filename) # non-atomic
shutil.move(dst_temp_filename,dst_filename) # atomic
else:
# an atomic move is performed
# (both files are on the same device,
# or the destination doesn't exist)
#
shutil.move(src_filename, dst_filename)
########################################
def set_defaults():
"""
Detects whether the program is run
as an ordinary user or as root, and
then sets defauts directories for
packages, configurations, and sources.
caveat: this is an FreeDesktop-friendly
version, and we will need eventually
to have Windows- and OSX-friendly
versions.
See: http://freedesktop.org/wiki/Home
and: http://www.linuxfoundation.org/collaborate/workgroups/lsb/fhs
"""
global dataset_conf_path, \
dataset_data_path, \
root_conf_path, \
root_data_path, \
user_conf_path, \
super_powers
# a conspicuously LINUX version
# (on windows, if we ever do a
# windows version, these would
# be different, and we may even
# not have 'root' per se.)
#
root_conf_path="/etc/pylearn/"
root_data_path="/usr/share/pylearn/dataset/"
user_conf_path=os.path.join(os.environ["HOME"],".local/share/pylearn/")
user_data_path=os.path.join(os.environ["HOME"],".local/share/pylearn/dataset/")
if has_super_powers():
dataset_conf_path=root_conf_path
dataset_data_path=root_data_path
super_powers=True
else:
dataset_conf_path=user_conf_path
dataset_data_path=user_data_path
super_powers=False
# check if directories exist, and if not,
# create them, and then fetch source.lst
#
if not os.path.exists(dataset_conf_path):
os.makedirs(dataset_conf_path)
if not os.path.exists(os.path.join(dataset_conf_path,dataset_sources)):
atomic_update(os.path.join(dataset_web,dataset_sources),
os.path.join(dataset_conf_path,dataset_sources),
progress_bar)
if not os.path.exists(dataset_data_path):
os.makedirs(dataset_data_path)
read_packages_sources()
read_installed_packages_list();
########################################
def read_packages_sources():
"""
Reads the sources.lst file and
populates the available packages
list.
caveat: parsing of the sources.lst
is pathetic
Assigns: packages_sources
:raises: RuntimeError if sources.lst cannot be read
"""
def read_from_file(config_filename):
"""
Reads a sources.lst file from a given location
:param config_filename: the configuration file to read
"""
global packages_sources
try:
f=open(config_filename,"r")
except Exception as e:
# not a problem if not found in a given location
pass
else:
# file opened
for line in f:
t=line.rstrip().split(' ') # rstrips strips whitespaces at the end (\n)
packages_sources[t[0]]=\
this_package=package_info(
config_filename,
t[0], # name
t[1], # timestamp
t[2], # human-readable size
urllib.unquote(t[3]), # source on the web
None) # None as not installed (from source) (may be overridden later)
if super_powers:
read_from_file(os.path.join(dataset_conf_path,dataset_sources))
else:
# read root, user, then paths.
paths=[ os.path.join(root_conf_path,dataset_sources),
os.path.join(user_conf_path,dataset_sources) ]
try:
paths+=[ os.path.join(x,dataset_sources) for x in re.split(":|;",os.environ["PYLEARN2_DATA_PATH"]) ]
except Exception:
# PYLEARN2_DATA_PATH may or mayn't be defined
pass
for path in paths:
read_from_file(path)
if len(packages_sources)==0:
raise RuntimeError( "[cf] fatal: could not find/read sources.lst (unexpected!)" )
########################################
def read_installed_packages_list():
"""
Reads the various installed.lst files
found on the system. First it searches
for the root-installed installed.lst,
then the user's, then searches the
locations specified by the environment
variable PYLEARN2_DATA_PATH (which is
a standard :-separated list of locations)
Assigns: installed_packages_list
"""
# note: we add and overwrite rather
# than clearing and filling (so we can
# read many installed.lst, but the last
# ones read overrides the earlier ones)
#
def read_from_file(config_filename):
"""
Reads an installed.lst file from a given location
:param config_filename: the configuration file to read
"""
global installed_packages_list
try:
installed_list_file=open(config_filename)
except IOError:
# not a problem if not found in a location
pass
else:
# read from file and
# create a dictionary
#
for line in installed_list_file:
l=line.rstrip().split(' ') # removes trailing whitespaces (\n)
if l:
installed_packages_list[l[0]]=\
this_package=package_info(
config_filename,
l[0], # name
l[1], # timestamp
l[2], # human-readable size
urllib.unquote(l[3]), # source on the web
urllib.unquote(l[4])) # where installed
else:
pass # skip blank lines (there shouldn't be any)
if super_powers:
# then read only root
read_from_file(os.path.join(dataset_conf_path,"installed.lst"))
else:
# read root, user, then paths.
paths=[ os.path.join(root_conf_path,"installed.lst"),
os.path.join(user_conf_path,"installed.lst") ]
try:
paths+=[ os.path.join(x,"installed.lst") for x in re.split(":|;",os.environ["PYLEARN2_DATA_PATH"]) ]
except Exception:
# PYLEARN2_DATA_PATH may or mayn't be defined
pass
for path in paths:
read_from_file(path)
if len(installed_packages_list)==0:
logger.warning("[cf] no install.lst found "
"(will be created on install/upgrade)")
########################################
def write_installed_packages_list():
"""
Saves the installed package list and
their location (file over-writen depends
on run as root or as a normal user)
"""
global installed_packages_list
try:
tmp=open(os.path.join(dataset_conf_path,"installed.lst.2"),"w")
except IOError:
raise RuntimeError("[cf] fatal: cannot create temp file")
else:
# ok, probably worked?
for package in installed_packages_list.values():
# adds only packages that are readable for
# this user (maybe some site-installed datasets
# are out of his reach)
#
if package.where!=None and \
file_access_rights(os.path.join(package.where,package.name),
os.F_OK | os.R_OK):
print(
" ".join(map(str,[ package.name,
package.timestamp,
package.readable_size,
urllib.quote(package.source,"/:~"),
urllib.quote(package.where,"/:~") ] )),
file=tmp)
# replace the installed.lst in
# a safe way
atomic_replace(os.path.join(dataset_conf_path,"installed.lst.2"),
os.path.join(dataset_conf_path,"installed.lst"))
########################################
def atomic_update( remote_src, local_dst, hook=None ):
"""
Takes a (possibly) remote file an checks
if it is newer than a(n obligatoritly)
local file. If the source is newer, an
"atomic update" is performed.
Atomic here means that the source is
downloaded in a distinct location, and
only if download is successful is the
destination file replaced atomically.
:param remote_src: Url to a (possibly) remote file
:param local_dst: file to update
:param hook: download progress hook
:raises: various IOErrors
"""
global hook_download_filename # hook-related
try:
remote_date = get_timestamp_from_url(remote_src);
except IOError as e:
raise IOError("[ts] %s %s" % (str(e),remote_src))
else:
if os.path.exists(local_dst):
# it exists, test for update
try:
local_date = get_timestamp_from_url(local_path_as_url(local_dst))
except Exception as e:
raise IOError("[ts] %s %s" % (str(e),local_dst))
else:
if (local_date<remote_date):
# OK, the file seems to be out-of-date
# let's update it
#
if file_access_rights(local_dst,os.W_OK,check_above=True):
# we have write access to the file, or if it doesn't
# exist, to the directory where we want to write it.
#
try:
hook_download_filename=remote_src # hook-related
temp_filename=download_from_url(remote_src, filename=None, progress_hook=hook)
except Exception as e:
raise IOError("[dl] %s %s" % (str(e),remote_src))
else:
# download to temporary was successful,
# let's (try to) perform the atomic replace
#
try:
atomic_replace(temp_filename,local_dst)
except Exception as e:
raise IOError("[ac] %s %s --> %s" % (str(e),temp_filename,local_dst))
else:
raise IOError("[rw] no write access to %s " % local_dst )
else:
# file's up to date, everything's fine
# and there's nothing else to do
#
pass
else:
# file does not exist, just download!
#
if file_access_rights(local_dst,os.W_OK,check_above=True):
try:
hook_download_filename=remote_src # hook-related
temp_filename=download_from_url(remote_src, filename=None, progress_hook=hook)
except Exception as e:
raise IOError("[dl] %s %s" % (str(e),remote_src))
else:
# yay! download successful!
#
try:
atomic_replace(temp_filename,local_dst)
except Exception as e:
raise IOError("[ac] %s %s --> %s" % (str(e),temp_filename,local_dst))
else:
raise IOError("[rw] no right access to %s" % local_dst)
########################################
def unpack_tarball( tar_filename, dest_path ):
"""
Unpacks a (bzipped2) tarball to a destination
directory
:param tar_filename: the bzipped2 tar file
:param dest_path: a path to where expand the tarball
:raises: various IOErrors
"""
if os.path.exists(tar_filename):
if file_access_rights(dest_path,os.W_OK,check_above=False):
try:
# open the tarball as read, bz2
#
this_tar_file=tarfile.open(tar_filename,"r:bz2")
except Exception as e:
raise IOError("[tar] cannot open '%s'" % tar_filename)
else:
# ok, it's openable, let's expand it
#
try:
this_tar_file.extractall(dest_path)
except Exception as e:
raise IOError("[tar] error while extracting '%s'" %tar_filename)
else:
# yay! success!
pass
else:
raise IOError("[tar] no right access to '%s'" % dest_path)
else:
raise IOError("'%s' not found" % tar_filename)
########################################
def run_scripts( package_location, scripts ):
"""
Search for installation scripts speficied
by the scripts list
:param package_location: "root" path for the package
:param scripts: list of scripts to look for (and execute)
:raises: subprocess exceptions
"""
path=os.path.join(package_location,"scripts/")
cwd=os.getcwd()
os.chdir(path)
for script in scripts:
if os.path.exists( script ):
# throws CalledProcessError if return
# return code is not zero.
#
try:
subprocess.check_call( script, stdout=sys.stdout, stderr=sys.stderr )
except Exception:
os.chdir(cwd)
raise
# ok, success (or not), let's unstack
os.chdir(cwd)
########################################
def install_package( package, src, dst ):
"""
Unpacks a (bzipped2) tarball and
expands it to the given location.
If unpacking is successful, installation
scripts are run.
:param package: package information
:param src: the source tarball
:param dst: the destination directory
:raises: IOErrors and subprocess exceptions
"""
#FIXME: change creation flags to group-public
# readable when invoked with super-powers
#
unpack_tarball(src,dst)
run_scripts(dst+package.name, scripts=["getscript","postinst"] )
########################################
def remove_package(package,dst):
"""
Removes a script by running the
various removal scripts, then by
deleting files and directories.
:param package: package information
:param dst: packages root (where packages are installed)
"""
#path=os.path.join(dst,package.name)
path=os.path.join(package.where,package.name)
#print path
run_scripts(path,scripts=["prerm"])
shutil.rmtree(os.path.join(path,"data/"))
shutil.rmtree(os.path.join(path,"docs/"))
run_scripts(os.path.join(dst,package.name),scripts=["postrm"])
shutil.rmtree(os.path.join(path,"scripts/"))
shutil.rmtree(path)
update_installed_list("r",package)
########################################
def update_installed_list( op, package ):
"""
Updates the internal list of installed
packages. The operation is either "i"
for install and update, or "r" for removal
:param op: the operation performed
:param package: the package information
:param dst: where the package was installed
"""
if op=="i":
installed_packages_list[package.name]=package;
elif op=="r":
# remove the package from the list
del installed_packages_list[package.name]
else:
raise RuntimeError("[cf] fatal: invalid configuration op '%s'." % op)
write_installed_packages_list()
########################################
def show_packages():
"""
List all available packages, both
installed or from remove sources
"""
logger.info("These packages are available:")
for this_package in packages_sources.values():
if this_package.name in installed_packages_list:
state="u" if installed_packages_list[this_package.name].timestamp<this_package.timestamp else 'i';
else:
state="-"
package_time = time.strftime("%a, %d %b %Y %H:%M:%S",
time.gmtime(this_package.timestamp))
logger.info("{0} {1:<20} {2:<8} "
"{3:<30} {4}".format(state,
this_package.name,
this_package.readable_size,
package_time,
this_package.source))
########################################
def install_upgrade( package, upgrade=False,progress_hook=None ):
"""
This function installs or upgrades a package.
:param package: package information
:param upgrade: If True, performs and upgrade, installs underwise
:param progress_hook: a download progress hook
"""
global hook_download_filename # hook-related
if upgrade:
operation = "[up] upgrading"
else:
operation = "[in] installing"
logger.info("{0} '{1}' to {2}".format(operation,
package.name, dataset_data_path))
remote_src=package.source
# install location is determined by super-powers
# (so a root package can be upgraded locally!)
package.where=dataset_data_path;
# TODO: to add caching, first lookup the
# tarball in the package cache (but there's'nt
# one for now)
#
cached=False;
if not cached:
hook_download_filename=remote_src # hook-related
temp_filename=download_from_url(remote_src,filename=None,progress_hook=progress_hook)
else:
# assign filename to cached package
pass
logger.info("[in] running install scripts "
"for package '{0}'".format(package.name))
# runs through the .../package_name/scripts/
# directory and executes the scripts in a
# specific order (which shouldn't display
# much unless they fail)
#
install_package(package,temp_filename,dataset_data_path)
update_installed_list("i",package)
########################################
def upgrade_packages(packages_to_upgrade, hook=None ):
"""
Upgrades packages.
If no packages are supplied, it will perform
an "update-all" operation, finding all packages
that are out of date.
If packages names are supplied, only those
are checked for upgrade (and upgraded if out
of date)
:param packages_to_upgrade: list of package names.
:raises: IOErrors (from downloads/rights)
"""
# get names only
if packages_to_upgrade==[]:
packages_to_upgrade=installed_packages_list.keys() # all installed!
all_packages=True
else:
all_packages=False
# check what packages are in the list,
# and really to be upgraded.
#
packages_really_to_upgrade=[]
for this_package in packages_to_upgrade:
if this_package in installed_packages_list:
# check if there's a date
installed_date=installed_packages_list[this_package].timestamp
if this_package in packages_sources:
repo_date=packages_sources[this_package].timestamp
if installed_date < repo_date:
# ok, there's a newer version
logger.info(this_package)
packages_really_to_upgrade.append(this_package)
else:
# no newer version, nothing to update
pass
else:
logger.warning("[up] '{0}' is unknown "
"(installed from file?).".format(this_package))
else:
# not installed?
if not all_packages:
logger.warning("[up] '{0}' is not installed, "
"cannot upgrade.".format(this_package))
pass
# once we have determined which packages
# are to be updated, we show them to the
# user for him to confirm
#
if packages_really_to_upgrade!=[]:
logger.info("[up] the following package(s) will be upgraded:")
for this_package in packages_really_to_upgrade:
readable_size = packages_sources[this_package].readable_size
logger.info("{0} ({1})".format(this_package, readable_size))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_upgrade:
install_upgrade( packages_sources[this_package], upgrade=True, progress_hook=hook )
else:
logger.info("[up] Taking '{0}' for no, so there.".format(r))
else:
# ok, nothing to upgrade,
# move along.
pass
########################################
#
# installs the packages, and forces if
# they already exist
#
# packages must be supplied as argument.
#
#
def install_packages( packages_to_install, force_install=False, hook=None ):
"""
Installs the packages, possibly forcing installs.
:param packages_to_install: list of package names
:param force_install: if True, re-installs even if installed.
:param hook: download progress hook
:raises: IOErrors
"""
if packages_to_install==[]:
raise RuntimeError("[in] fatal: need packages names to install.")
if force_install:
logger.warning("[in] using the force")
packages_really_to_install=[]
for this_package in packages_to_install:
if this_package in packages_sources:
if force_install or not this_package in installed_packages_list:
packages_really_to_install.append(this_package)
else:
logger.warning("[in] package '{0}' "
"is already installed".format(this_package))
else:
logger.warning("[in] unknown package '{0}'".format(this_package))
if packages_really_to_install!=[]:
logger.info("[in] The following package(s) will be installed:")
for this_package in packages_really_to_install:
readable_size = packages_sources[this_package].readable_size
logger.info("{0} ({1})".format(this_package, readable_size))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_install:
install_upgrade( packages_sources[this_package], upgrade=False, progress_hook=hook )
else:
logger.info("[in] Taking '{0}' for no, so there.".format(r))
else:
# ok, nothing to upgrade,
# move along.
pass
########################################
def install_packages_from_file( packages_to_install ):
"""
(Force)Installs packages from files, but does
not update installed.lst files.
caveat: not as tested as everything else.
:param packages_to_install: list of files to install
:raises: IOErrors
"""
if packages_to_install==[]:
raise RuntimeError("[in] fatal: need packages names to install.")
packages_really_to_install=[]
for this_package in packages_to_install:
if os.path.exists(this_package):
packages_really_to_install.append(this_package)
else:
logger.warning("[in] package '{0}' not found".format(this_package))
if packages_really_to_install!=[]:
logger.info("[in] The following package(s) will be installed:")
packages = []
for this_package in packages_really_to_install:
packages.append(corename(this_package))
logger.info(' '.join(packages))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_install:
#install_upgrade( this_package, upgrade=False, progress_hook=hook )
if os.path.exists(dataset_data_path+corename(this_package)):
r = input("[in] '%s' already installed, overwrite? [yes/N] " % corename(this_package))
if r!='y' and r!='yes':
logger.info("[in] skipping package "
"'{0}'".format(corename(this_package)))
continue
install_package( corename(this_package), this_package, dataset_data_path)
#update_installed_list("i",(make a package object here),dataset_data_path)
else:
logger.info("[in] Taking '{0}' for no, so there.".format(r))
########################################
#
# uninstall packages, whether or not they
# are found in the sources.lst file (to
# account for the packages installed from
# file)
#
# like install, it expects a list, if there's
# no list, nothing happens. It will test
# whether or not the packages are installed, and
# will ask the user for a confirmation.
#
def remove_packages( packages_to_remove ):
"""
Uninstall packages, whether or not they
are found in the source.lst (so it can
remove datasets installed from file).
:param packages_to_remove: list of package names
:raises: IOErrors
"""
if packages_to_remove==[]:
raise RuntimeError("[rm] fatal: need packages names to remove.")
packages_really_to_remove=[]
for this_package in packages_to_remove:
if this_package in packages_sources:
#this_data_set_location=os.path.join(dataset_data_path,this_package)
# check if in the installed.lst
# then if directory actually exists
# then if you have rights to remove it
if this_package in installed_packages_list:
this_data_set_location=os.path.join( installed_packages_list[this_package].where,
installed_packages_list[this_package].name )
if os.path.exists(this_data_set_location):
if (file_access_rights(this_data_set_location,os.W_OK)):
# ok, you may have rights to delete it
packages_really_to_remove.append(this_package)
else:
logger.warning("[rm] insufficient rights "
"to remove '{0}'".format(this_package))
else:
logger.warning("[rm] package '{0}' found in config file "
"but not installed".format(this_package))
else:
logger.warning("[rm] package '{0}' "
"not installed".format(this_package))
else:
logger.warning("[rm] unknown package '{0}'".format(this_package))
if packages_really_to_remove!=[]:
logger.info("[rm] the following packages will be removed permanently:")
packages = []
for this_package in packages_really_to_remove:
packages.append(this_package)
logger.info(' '.join(packages))
r = input("Proceed? [yes/N] ")
if r=='y' or r=='yes':
for this_package in packages_really_to_remove:
remove_package( installed_packages_list[this_package], dataset_data_path )
else:
logger.info("[up] Taking '{0}' for no, so there.".format(r))
else:
# ok, nothing to remove, filenames where bad.
pass
########################################
hook_download_filename=""
def progress_bar( blocks, blocksize, totalsize ):
"""
Simple hook to show download progress.
caveat: not that great-looking, fix later to
a cooler progress bar or something.
"""
print("\r[dl] %6.2f%% %s" % (min(totalsize,blocks*blocksize)*100.0/totalsize, hook_download_filename), end='')
sys.stdout.flush()
########################################
def process_arguments():
"""
Processes the installation arguments (from
the command line)
The possible arguments are:
list
lists available datasets from
sources.lst
update
updates sources.lst
upgrade
upgrades datasets that are out
of date
install <dataset1> <dataset2> ... <datasetn>
uses sources.lst to locate the
package and perform the installation
force-install <dataset1> ... <datasetn>
performs an install even if the data
sets seem to be there.
remove <dataset1> <dataset2> ... <datasetn>
removes the dataset
clean
empties package cache (does nothing
for now, because no cache.)
"""
if len(sys.argv)>1:
# due to the relative simplicity of the
# arguments, we won't use optparse (2.3-2.6)
# nor argparse (2.7+), although in the future
# it may pose problems
if sys.argv[1]=="list":
show_packages()
elif sys.argv[1]=="update":
atomic_update( os.path.join(dataset_web,dataset_sources),
os.path.join(dataset_conf_path,dataset_sources),
hook=progress_bar)
elif sys.argv[1]=="upgrade":
upgrade_packages(sys.argv[2:],
hook=progress_bar)
elif sys.argv[1]=="install":
install_packages(sys.argv[2:],
hook=progress_bar)
elif sys.argv[1]=="install-from-file":
install_packages_from_file(sys.argv[2:])
elif sys.argv[1]=="force-install":
install_packages(sys.argv[2:],
force_install=True,
hook=progress_bar)
elif sys.argv[1]=="remove":
remove_packages(sys.argv[2:])
elif sys.argv[1]=="clean":
# does nothing, no cache implemented
# yet.
pass
elif sys.argv[1]=="version":
logger.info(__version__)
else:
raise RuntimeError("[cl] unknown command '%s'" % sys.argv[1])
else:
raise RuntimeError("[cl] missing command")
########################################
if __name__ == "__main__":
# to remove RuntimeWarnings about how
# tempfilename is unsafe.
#
warnings.simplefilter("ignore", RuntimeWarning)
# OK, let's construct the environment
# needed by dataset-get
try:
set_defaults()
except Exception as e:
logger.exception(e)
exit(1) # fail!
try:
process_arguments()
except Exception as e:
logger.exception(e)
exit(1)
| bsd-3-clause | ba0c3a2ae7a7502fc39fbdb8cf6d19d3 | 31.528736 | 114 | 0.56336 | 4.251213 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/datasets/binarizer.py | 45 | 1455 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.datasets.transformer_dataset import TransformerDataset
from pylearn2.expr.sampling import SampleBernoulli
class Binarizer(TransformerDataset):
"""
A TransformerDataset that takes examples with features in the interval
[0,1], and uses these as Bernoulli parameters to sample examples
with features in {0,1}.
Parameters
----------
raw : pylearn2 Dataset
It must provide examples with features in the interval [0,1].
seed : integer or list of integers, optional
The seed passed to MRG_RandomStreams to make the Bernoulli
samples. If not specified, all class instances default to
the same seed so two instances can be run synchronized side
by side.
"""
def __init__(self, raw, seed=None):
transformer = SampleBernoulli(seed=seed)
super(Binarizer, self).__init__(
raw, transformer, space_preserving=True)
def get_design_matrix(self, topo=None):
"""
.. todo::
WRITEME
"""
if topo is not None:
return self.raw.get_design_matrix(topo)
X = self.raw.get_design_matrix()
return self.transformer.perform(X)
| bsd-3-clause | 8351d572eacba029adee7f11f86d0fb7 | 28.1 | 74 | 0.644674 | 4.064246 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/models/dbm/layer.py | 31 | 127336 | """
Common DBM Layer classes
"""
from __future__ import print_function
__authors__ = ["Ian Goodfellow", "Vincent Dumoulin"]
__copyright__ = "Copyright 2012-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import functools
import logging
import numpy as np
import operator
from theano.compat.six.moves import input, reduce, xrange
import time
import warnings
from theano import tensor as T, function, config
import theano
from theano.gof.op import get_debug_values
from theano.printing import Print
from pylearn2.compat import OrderedDict
from pylearn2.expr.nnet import sigmoid_numpy
from pylearn2.expr.probabilistic_max_pooling import max_pool_channels, max_pool_b01c, max_pool, max_pool_c01b
from pylearn2.linear.conv2d import make_random_conv2D, make_sparse_random_conv2D
from pylearn2.linear.conv2d_c01b import setup_detector_layer_c01b
from pylearn2.linear.matrixmul import MatrixMul
from pylearn2.model_extensions.norm_constraint import MaxL2FilterNorm
from pylearn2.models import Model
from pylearn2.models.dbm import init_sigmoid_bias_from_marginals
from pylearn2.space import VectorSpace, CompositeSpace, Conv2DSpace, Space
from pylearn2.utils import is_block_gradient
from pylearn2.utils import sharedX, safe_zip, py_integer_types, block_gradient
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.rng import make_theano_rng
from pylearn2.utils import safe_union
logger = logging.getLogger(__name__)
class Layer(Model):
"""
Abstract class.
A layer of a DBM.
May only belong to one DBM.
Each layer has a state ("total state") that can be split into
the piece that is visible to the layer above ("upward state")
and the piece that is visible to the layer below ("downward state").
(Since visible layers don't have a downward state, the downward_state
method only appears in the DBM_HiddenLayer subclass)
For simple layers, all three of these are the same thing.
"""
def get_dbm(self):
"""
Returns the DBM that this layer belongs to, or None
if it has not been assigned to a DBM yet.
"""
if hasattr(self, 'dbm'):
return self.dbm
return None
def set_dbm(self, dbm):
"""
Assigns this layer to a DBM.
Parameters
----------
dbm : WRITEME
"""
assert self.get_dbm() is None
self.dbm = dbm
def get_total_state_space(self):
"""
Returns the Space that the layer's total state lives in.
"""
raise NotImplementedError(str(type(self))+" does not implement " +\
"get_total_state_space()")
def get_monitoring_channels(self):
"""
.. todo::
WRITEME
"""
return OrderedDict()
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
return OrderedDict()
def upward_state(self, total_state):
"""
Takes total_state and turns it into the state that layer_above should
see when computing P( layer_above | this_layer).
So far this has two uses:
* If this layer consists of a detector sub-layer h that is pooled
into a pooling layer p, then total_state = (p,h) but layer_above
should only see p.
* If the conditional P( layer_above | this_layer) depends on
parameters of this_layer, sometimes you can play games with
the state to avoid needing the layers to communicate. So far
the only instance of this usage is when the visible layer
is N( Wh, beta). This makes the hidden layer be
sigmoid( v beta W + b). Rather than having the hidden layer
explicitly know about beta, we can just pass v beta as
the upward state.
Parameters
----------
total_state : WRITEME
Notes
-----
This method should work both for computing sampling updates
and for computing mean field updates. So far I haven't encountered
a case where it needs to do different things for those two
contexts.
"""
return total_state
def make_state(self, num_examples, numpy_rng):
"""
Returns a shared variable containing an actual state (not a mean field
state) for this variable.
Parameters
----------
num_examples : WRITEME
numpy_rng : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError("%s doesn't implement make_state" %
type(self))
def make_symbolic_state(self, num_examples, theano_rng):
"""
Returns a theano symbolic variable containing an actual state (not a
mean field state) for this variable.
Parameters
----------
num_examples : WRITEME
numpy_rng : WRITEME
Returns
-------
WRITEME
"""
raise NotImplementedError("%s doesn't implement make_symbolic_state" %
type(self))
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
Returns an expression for samples of this layer's state, conditioned on
the layers above and below Should be valid as an update to the shared
variable returned by self.make_state
Parameters
----------
state_below : WRITEME
Corresponds to layer_below.upward_state(full_state_below),
where full_state_below is the same kind of object as you get
out of layer_below.make_state
state_above : WRITEME
Corresponds to layer_above.downward_state(full_state_above)
theano_rng : WRITEME
An MRG_RandomStreams instance
Returns
-------
WRITEME
Notes
-----
This can return multiple expressions if this layer's total state
consists of more than one shared variable.
"""
if hasattr(self, 'get_sampling_updates'):
raise AssertionError("Looks like "+str(type(self))+" needs to rename get_sampling_updates to sample.")
raise NotImplementedError("%s doesn't implement sample" %
type(self))
def expected_energy_term(self, state,
average,
state_below,
average_below):
"""
Returns a term of the expected energy of the entire model.
This term should correspond to the expected value of terms
of the energy function that:
- involve this layer only
- if there is a layer below, include terms that involve both this layer
and the layer below
Do not include terms that involve the layer below only.
Do not include any terms that involve the layer above, if it
exists, in any way (the interface doesn't let you see the layer
above anyway).
Parameters
----------
state_below : WRITEME
Upward state of the layer below.
state : WRITEME
Total state of this layer
average_below : bool
If True, the layer below is one of the variables to integrate
over in the expectation, and state_below gives its variational
parameters. If False, that layer is to be held constant and
state_below gives a set of assignments to it average: like
average_below, but for 'state' rather than 'state_below'
Returns
-------
rval : tensor_like
A 1D theano tensor giving the expected energy term for each example
"""
raise NotImplementedError(str(type(self))+" does not implement expected_energy_term.")
def finalize_initialization(self):
"""
Some layers' initialization depends on layer above being initialized,
which is why this method is called after `set_input_space` has been
called.
"""
pass
class VisibleLayer(Layer):
"""
Abstract class.
A layer of a DBM that may be used as a visible layer.
Currently, all implemented layer classes may be either visible
or hidden but not both. It may be worth making classes that can
play both roles though. This would allow getting rid of the BinaryVector
class.
"""
def get_total_state_space(self):
"""
Returns the total state of the layer.
Returns
-------
total_state : member of the input space
The total state of the layer.
"""
return self.get_input_space()
class HiddenLayer(Layer):
"""
Abstract class.
A layer of a DBM that may be used as a hidden layer.
"""
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
return total_state
def get_stdev_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_stdev_rewards")
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_range_rewards")
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_l1_act_cost")
def get_l2_act_cost(self, state, target, coeff):
"""
.. todo::
WRITEME
"""
raise NotImplementedError(str(type(self))+" does not implement get_l2_act_cost")
class BinaryVector(VisibleLayer):
"""
A DBM visible layer consisting of binary random variables living
in a VectorSpace.
Parameters
----------
nvis : int
Dimension of the space
bias_from_marginals : pylearn2.datasets.dataset.Dataset
Dataset, whose marginals are used to initialize the visible biases
center : bool
If True, use Gregoire Montavon's centering trick
copies : int
Use this number of virtual copies of the state. All the copies
still share parameters. This can be useful for balancing the
amount of influencing two neighboring layers have on each other
if the layers have different numbers or different types of units.
Without this replication, layers with more units or units with
a greater dynamic range would dominate the interaction due to
the symmetric nature of undirected interactions.
"""
def __init__(self,
nvis,
bias_from_marginals = None,
center = False,
copies = 1, learn_init_inpainting_state = False):
super(BinaryVector, self).__init__()
self.__dict__.update(locals())
del self.self
# Don't serialize the dataset
del self.bias_from_marginals
self.space = VectorSpace(nvis)
self.input_space = self.space
origin = self.space.get_origin()
if bias_from_marginals is None:
init_bias = np.zeros((nvis,))
else:
init_bias = init_sigmoid_bias_from_marginals(bias_from_marginals)
self.bias = sharedX(init_bias, 'visible_bias')
if center:
self.offset = sharedX(sigmoid_numpy(init_bias))
def get_biases(self):
"""
Returns
-------
biases : ndarray
The numpy value of the biases
"""
return self.bias.get_value()
def set_biases(self, biases, recenter=False):
"""
.. todo::
WRITEME
"""
self.bias.set_value(biases)
if recenter:
assert self.center
self.offset.set_value(sigmoid_numpy(self.bias.get_value()))
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'center'):
self.center = False
if self.center:
rval = total_state - self.offset
else:
rval = total_state
if not hasattr(self, 'copies'):
self.copies = 1
return rval * self.copies
def get_params(self):
"""
.. todo::
WRITEME
"""
return [self.bias]
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
assert state_below is None
if self.copies != 1:
raise NotImplementedError()
msg = layer_above.downward_message(state_above)
bias = self.bias
z = msg + bias
phi = T.nnet.sigmoid(z)
rval = theano_rng.binomial(size = phi.shape, p = phi, dtype = phi.dtype,
n = 1 )
return rval
def mf_update(self, state_above, layer_above):
"""
.. todo::
WRITEME
"""
msg = layer_above.downward_message(state_above)
mu = self.bias
z = msg + mu
rval = T.nnet.sigmoid(z)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
driver = numpy_rng.uniform(0.,1., (num_examples, self.nvis))
mean = sigmoid_numpy(self.bias.get_value())
sample = driver < mean
rval = sharedX(sample, name = 'v_sample_shared')
return rval
def make_symbolic_state(self, num_examples, theano_rng):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
mean = T.nnet.sigmoid(self.bias)
rval = theano_rng.binomial(size=(num_examples, self.nvis), p=mean,
dtype=theano.config.floatX)
return rval
def expected_energy_term(self, state, average, state_below = None, average_below = None):
"""
.. todo::
WRITEME
"""
if self.center:
state = state - self.offset
assert state_below is None
assert average_below is None
assert average in [True, False]
self.space.validate(state)
# Energy function is linear so it doesn't matter if we're averaging or not
rval = -T.dot(state, self.bias)
assert rval.ndim == 1
return rval * self.copies
def init_inpainting_state(self, V, drop_mask, noise = False, return_unmasked = False):
"""
.. todo::
WRITEME
"""
assert drop_mask is None or drop_mask.ndim > 1
unmasked = T.nnet.sigmoid(self.bias.dimshuffle('x',0))
# this condition is needed later if unmasked is used as V_hat
assert unmasked.ndim == 2
# this condition is also needed later if unmasked is used as V_hat
assert hasattr(unmasked.owner.op, 'scalar_op')
if drop_mask is not None:
masked_mean = unmasked * drop_mask
else:
masked_mean = unmasked
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = 0
if not self.learn_init_inpainting_state:
masked_mean = block_gradient(masked_mean)
masked_mean.name = 'masked_mean'
if noise:
theano_rng = theano.sandbox.rng_mrg.MRG_RandomStreams(42)
# we want a set of random mean field parameters, not binary samples
unmasked = T.nnet.sigmoid(theano_rng.normal(avg = 0.,
std = 1., size = masked_mean.shape,
dtype = masked_mean.dtype))
masked_mean = unmasked * drop_mask
masked_mean.name = 'masked_noise'
if drop_mask is None:
rval = masked_mean
else:
masked_V = V * (1-drop_mask)
rval = masked_mean + masked_V
rval.name = 'init_inpainting_state'
if return_unmasked:
assert unmasked.ndim > 1
return rval, unmasked
return rval
def inpaint_update(self, state_above, layer_above, drop_mask = None, V = None, return_unmasked = False):
"""
.. todo::
WRITEME
"""
msg = layer_above.downward_message(state_above)
mu = self.bias
z = msg + mu
z.name = 'inpainting_z_[unknown_iter]'
unmasked = T.nnet.sigmoid(z)
if drop_mask is not None:
rval = drop_mask * unmasked + (1-drop_mask) * V
else:
rval = unmasked
rval.name = 'inpainted_V[unknown_iter]'
if return_unmasked:
owner = unmasked.owner
assert owner is not None
op = owner.op
assert hasattr(op, 'scalar_op')
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
return rval, unmasked
return rval
def recons_cost(self, V, V_hat_unmasked, drop_mask = None, use_sum=False):
"""
.. todo::
WRITEME
"""
if use_sum:
raise NotImplementedError()
V_hat = V_hat_unmasked
assert hasattr(V_hat, 'owner')
owner = V_hat.owner
assert owner is not None
op = owner.op
block_grad = False
if is_block_gradient(op):
assert isinstance(op.scalar_op, theano.scalar.Identity)
block_grad = True
real, = owner.inputs
owner = real.owner
op = owner.op
if not hasattr(op, 'scalar_op'):
raise ValueError("Expected V_hat_unmasked to be generated by an Elemwise op, got "+str(op)+" of type "+str(type(op)))
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
z ,= owner.inputs
if block_grad:
z = block_gradient(z)
if V.ndim != V_hat.ndim:
raise ValueError("V and V_hat_unmasked should have same ndim, but are %d and %d." % (V.ndim, V_hat.ndim))
unmasked_cost = V * T.nnet.softplus(-z) + (1 - V) * T.nnet.softplus(z)
assert unmasked_cost.ndim == V_hat.ndim
if drop_mask is None:
masked_cost = unmasked_cost
else:
masked_cost = drop_mask * unmasked_cost
return masked_cost.mean()
class BinaryVectorMaxPool(HiddenLayer):
"""
A hidden layer that does max-pooling on binary vectors.
It has two sublayers, the detector layer and the pooling
layer. The detector layer is its downward state and the pooling
layer is its upward state.
Parameters
----------
detector_layer_dim : int
Number of units in the detector layer
pool_size : int
Number of detector units per pooling unit
(Pools are disjoint)
layer_name : str
Name of the layer
irange : float
If specified, initialize the weights in U(-irange, irange)
include_prob : , optional
Probability of including a weight element in the set of weights
initialized to U(-irange, irange). If not included it is
initialized to 0.
sparse_init : int
If specified, initialize this many weights in each column
to be nonzero.
sparse_stdev : float
When using sparse_init, the non-zero weights are drawn from
a Gaussian distribution with mean 0 and standard deviation
`sparse_stdev`
init_bias : float or ndarray
Initialize the biases to this value
W_lr_scale : float
Multiply the learning rate on the weights by this number
b_lr_scale : float
Multiply the learning rate on the biases by this number
center : bool
If True, use Gregoire Montavon's centering trick
mask_weights : WRITEME
max_col_norm : float
Constrain the columns of the weight matrix to have at most
this norm
copies : int
See BinaryVector docstring for explanation
"""
# TODO: this layer uses (pooled, detector) as its total state,
# which can be confusing when listing all the states in
# the network left to right. Change this and
# pylearn2.expr.probabilistic_max_pooling to use
# (detector, pooled)
def __init__(self,
detector_layer_dim,
pool_size,
layer_name,
irange = None,
sparse_init = None,
sparse_stdev = 1.,
include_prob = 1.0,
init_bias = 0.,
W_lr_scale = None,
b_lr_scale = None,
center = False,
mask_weights = None,
max_col_norm = None,
copies = 1):
super(BinaryVectorMaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
self.b = sharedX( np.zeros((self.detector_layer_dim,)) + init_bias, name = layer_name + '_b')
if self.center:
if self.pool_size != 1:
raise NotImplementedError()
self.offset = sharedX(sigmoid_numpy(self.b.get_value()))
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm, axis=0))
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
rval = OrderedDict()
if self.W_lr_scale is not None:
W, = self.transformer.get_params()
rval[W] = self.W_lr_scale
if self.b_lr_scale is not None:
rval[self.b] = self.b_lr_scale
return rval
def set_input_space(self, space):
"""
.. todo::
WRITEME
Notes
-----
This resets parameters!
"""
self.input_space = space
if isinstance(space, VectorSpace):
self.requires_reformat = False
self.input_dim = space.dim
else:
self.requires_reformat = True
self.input_dim = space.get_total_dimension()
self.desired_space = VectorSpace(self.input_dim)
if not (self.detector_layer_dim % self.pool_size == 0):
raise ValueError("detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d" %
(self.detector_layer_dim, self.pool_size, self.detector_layer_dim % self.pool_size))
self.h_space = VectorSpace(self.detector_layer_dim)
self.pool_layer_dim = self.detector_layer_dim / self.pool_size
self.output_space = VectorSpace(self.pool_layer_dim)
rng = self.dbm.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,
self.irange,
(self.input_dim, self.detector_layer_dim)) * \
(rng.uniform(0.,1., (self.input_dim, self.detector_layer_dim))
< self.include_prob)
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.detector_layer_dim))
def mask_rejects(idx, i):
if self.mask_weights is None:
return False
return self.mask_weights[idx, i] == 0.
for i in xrange(self.detector_layer_dim):
assert self.sparse_init <= self.input_dim
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0 or mask_rejects(idx, i):
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn()
W *= self.sparse_stdev
W = sharedX(W)
W.name = self.layer_name + '_W'
self.transformer = MatrixMul(W)
W ,= self.transformer.get_params()
assert W.name is not None
if self.mask_weights is not None:
expected_shape = (self.input_dim, self.detector_layer_dim)
if expected_shape != self.mask_weights.shape:
raise ValueError("Expected mask with shape "+str(expected_shape)+" but got "+str(self.mask_weights.shape))
self.mask = sharedX(self.mask_weights)
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
# Patch old pickle files
if not hasattr(self, 'mask_weights'):
self.mask_weights = None
if not hasattr(self, 'max_col_norm'):
self.max_col_norm = None
if self.mask_weights is not None:
W ,= self.transformer.get_params()
if W in updates:
updates[W] = updates[W] * self.mask
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace((self.output_space, self.h_space))
def get_params(self):
"""
.. todo::
WRITEME
"""
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
rval = self.transformer.get_params()
assert not isinstance(rval, set)
rval = list(rval)
assert self.b not in rval
rval.append(self.b)
return rval
def get_weight_decay(self, coeff):
"""
.. todo::
WRITEME
"""
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
W ,= self.transformer.get_params()
return coeff * T.sqr(W).sum()
def get_weights(self):
"""
.. todo::
WRITEME
"""
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W ,= self.transformer.get_params()
return W.get_value()
def set_weights(self, weights):
"""
.. todo::
WRITEME
"""
W, = self.transformer.get_params()
W.set_value(weights)
def set_biases(self, biases, recenter = False):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
if recenter:
assert self.center
if self.pool_size != 1:
raise NotImplementedError()
self.offset.set_value(sigmoid_numpy(self.b.get_value()))
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
def get_weights_format(self):
"""
.. todo::
WRITEME
"""
return ('v', 'h')
def get_weights_view_shape(self):
"""
.. todo::
WRITEME
"""
total = self.detector_layer_dim
cols = self.pool_size
if cols == 1:
# Let the PatchViewer decidew how to arrange the units
# when they're not pooled
raise NotImplementedError()
# When they are pooled, make each pooling unit have one row
rows = total / cols
return rows, cols
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
W ,= self.transformer.get_params()
W = W.T
W = W.reshape((self.detector_layer_dim, self.input_space.shape[0],
self.input_space.shape[1], self.input_space.num_channels))
W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c'))
return function([], W)()
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
self.h_space.validate(h)
self.output_space.validate(p)
if not hasattr(self, 'center'):
self.center = False
if self.center:
return p - self.offset
if not hasattr(self, 'copies'):
self.copies = 1
return p * self.copies
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
return h - self.offset
return h * self.copies
def get_monitoring_channels(self):
"""
.. todo::
WRITEME
"""
W ,= self.transformer.get_params()
assert W.ndim == 2
sq_W = T.sqr(W)
row_norms = T.sqrt(sq_W.sum(axis=1))
col_norms = T.sqrt(sq_W.sum(axis=0))
return OrderedDict([
('row_norms_min' , row_norms.min()),
('row_norms_mean' , row_norms.mean()),
('row_norms_max' , row_norms.max()),
('col_norms_min' , col_norms.min()),
('col_norms_mean' , col_norms.mean()),
('col_norms_max' , col_norms.max()),
])
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
P, H = state
rval = OrderedDict()
if self.pool_size == 1:
vars_and_prefixes = [ (P,'') ]
else:
vars_and_prefixes = [ (P, 'p_'), (H, 'h_') ]
for var, prefix in vars_and_prefixes:
v_max = var.max(axis=0)
v_min = var.min(axis=0)
v_mean = var.mean(axis=0)
v_range = v_max - v_min
# max_x.mean_u is "the mean over *u*nits of the max over e*x*amples"
# The x and u are included in the name because otherwise its hard
# to remember which axis is which when reading the monitor
# I use inner.outer rather than outer_of_inner or something like that
# because I want mean_x.* to appear next to each other in the alphabetical
# list, as these are commonly plotted together
for key, val in [
('max_x.max_u', v_max.max()),
('max_x.mean_u', v_max.mean()),
('max_x.min_u', v_max.min()),
('min_x.max_u', v_min.max()),
('min_x.mean_u', v_min.mean()),
('min_x.min_u', v_min.min()),
('range_x.max_u', v_range.max()),
('range_x.mean_u', v_range.mean()),
('range_x.min_u', v_range.min()),
('mean_x.max_u', v_mean.max()),
('mean_x.mean_u', v_mean.mean()),
('mean_x.min_u', v_mean.min())
]:
rval[prefix+key] = val
return rval
def get_stdev_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if isinstance(coeffs, str):
coeffs = float(coeffs)
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
assert all([isinstance(elem, float) for elem in [c]])
if c == 0.:
continue
mn = s.mean(axis=0)
dev = s - mn
stdev = T.sqrt(T.sqr(dev).mean(axis=0))
rval += (0.5 - stdev).mean()*c
return rval
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if isinstance(coeffs, str):
coeffs = float(coeffs)
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
assert all([isinstance(elem, float) for elem in [c]])
if c == 0.:
continue
mx = s.max(axis=0)
assert hasattr(mx.owner.op, 'grad')
assert mx.ndim == 1
mn = s.min(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mn.ndim == 1
r = mx - mn
rval += (1 - r).mean()*c
return rval
def get_l1_act_cost(self, state, target, coeff, eps = None):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if not isinstance(target, float):
raise TypeError("BinaryVectorMaxPool.get_l1_act_cost expected target of type float " + \
" but an instance named "+self.layer_name + " got target "+str(target) + " of type "+str(type(target)))
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = [0.]
else:
eps = [eps]
else:
assert all([len(elem) == 2 for elem in [state, target, coeff]])
if eps is None:
eps = [0., 0.]
if target[1] > target[0]:
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c, e]])
if c == 0.:
continue
m = s.mean(axis=0)
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_l2_act_cost(self, state, target, coeff):
"""
.. todo::
WRITEME
"""
rval = 0.
P, H = state
self.output_space.validate(P)
self.h_space.validate(H)
if self.pool_size == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
if not isinstance(target, float):
raise TypeError("BinaryVectorMaxPool.get_l1_act_cost expected target of type float " + \
" but an instance named "+self.layer_name + " got target "+str(target) + " of type "+str(type(target)))
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
_, state = state
state = [state]
target = [target]
coeff = [coeff]
else:
assert all([len(elem) == 2 for elem in [state, target, coeff]])
if target[1] > target[0]:
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c in safe_zip(state, target, coeff):
assert all([isinstance(elem, float) or hasattr(elem, 'dtype') for elem in [t, c]])
if c == 0.:
continue
m = s.mean(axis=0)
assert m.ndim == 1
rval += T.square(m-t).mean()*c
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
if self.copies != 1:
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
if state_above is not None:
msg = layer_above.downward_message(state_above)
else:
msg = None
if self.requires_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
z = self.transformer.lmul(state_below) + self.b
p, h, p_sample, h_sample = max_pool_channels(z,
self.pool_size, msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
self.h_space.validate(downward_state)
rval = self.transformer.lmul_T(downward_state)
if self.requires_reformat:
rval = self.desired_space.format_as(rval, self.input_space)
return rval * self.copies
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
# work around theano bug with broadcasted vectors
z = T.alloc(0., self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + \
self.b.dimshuffle('x', 0)
rval = max_pool_channels(z = z,
pool_size = self.pool_size)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
""" Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
empty_input = self.h_space.get_origin_batch(num_examples)
empty_output = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
p_state = sharedX(empty_output)
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16), which_method="binomial")
default_z = T.zeros_like(h_state) + self.b
p_exp, h_exp, p_sample, h_sample = max_pool_channels(
z = default_z,
pool_size = self.pool_size,
theano_rng = theano_rng)
assert h_sample.dtype == default_z.dtype
f = function([], updates = [
(p_state , p_sample),
(h_state , h_sample)
])
f()
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def make_symbolic_state(self, num_examples, theano_rng):
"""
.. todo::
WRITEME
"""
"""
Returns a theano symbolic variable containing an actual state
(not a mean field state) for this variable.
"""
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
default_z = T.alloc(self.b, num_examples, self.detector_layer_dim)
p_exp, h_exp, p_sample, h_sample = max_pool_channels(z=default_z,
pool_size=self.pool_size,
theano_rng=theano_rng)
assert h_sample.dtype == default_z.dtype
return p_sample, h_sample
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
# Don't need to do anything special for centering, upward_state / downward state
# make it all just work
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(downward_state, self.b)
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1)
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval * self.copies
def linear_feed_forward_approximation(self, state_below):
"""
Used to implement TorontoSparsity. Unclear exactly what properties of
it are important or how to implement it for other layers.
Properties it must have: output is same kind of data structure (ie,
tuple of theano 2-tensors) as mf_update.
Properties it probably should have for other layer types: an
infinitesimal change in state_below or the parameters should cause the
same sign of change in the output of linear_feed_forward_approximation
and in mf_update
Should not have any non-linearities that cause the gradient to shrink
Should disregard top-down feedback
Parameters
----------
state_below : WRITEME
"""
z = self.transformer.lmul(state_below) + self.b
if self.pool_size != 1:
# Should probably implement sum pooling for the non-pooled version,
# but in reality it's not totally clear what the right answer is
raise NotImplementedError()
return z, z
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = max_pool_channels(z, self.pool_size, msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
class Softmax(HiddenLayer):
"""
A layer representing a single softmax distribution of a
set of discrete categories.
Parameters
----------
n_classes : int
The number of discrete categories.
layer_name : str
The name of the layer.
irange : float
If not None, initialze the weights in U(-irange, irange)
sparse_init : int
If not None, initialize `sparse_init` weights per column
to N(0, sparse_istdev^2)
sparse_istdev : float
see above
W_lr_scale : float
Scale the learning rate on the weights by this amount
b_lr_scale : float
Scale the learning rate on the biases by this amount
max_col_norm : float
If not None, constrain the columns of the weight matrix
to have at most this L2 norm
copies : int
Make this many copies of the random variables, all sharing
the same weights. This allows the undirected model to
behave as if it has asymmetric connections.
center : bool
If True, use Gregoire Montavon's centering trick.
learn_init_inpainting_state : bool
If True, and using inpainting-based methods (MP-DBM), learn
a parameter controlling the initial value of the mean field
state for this layer.
"""
presynaptic_name = "presynaptic_Y_hat"
def __init__(self, n_classes, layer_name, irange = None,
sparse_init = None, sparse_istdev = 1., W_lr_scale = None,
b_lr_scale = None,
max_col_norm = None,
copies = 1, center = False,
learn_init_inpainting_state = True):
super(Softmax, self).__init__()
if isinstance(W_lr_scale, str):
W_lr_scale = float(W_lr_scale)
self.__dict__.update(locals())
del self.self
assert isinstance(n_classes, py_integer_types)
self.output_space = VectorSpace(n_classes)
self.b = sharedX( np.zeros((n_classes,)), name = 'softmax_b')
if self.center:
b = self.b.get_value()
self.offset = sharedX(np.exp(b) / np.exp(b).sum())
if max_col_norm is not None:
self.extensions.append(MaxL2FilterNorm(max_col_norm, axis=0))
@functools.wraps(Model.get_lr_scalers)
def get_lr_scalers(self):
rval = OrderedDict()
# Patch old pickle files
if not hasattr(self, 'W_lr_scale'):
self.W_lr_scale = None
if self.W_lr_scale is not None:
assert isinstance(self.W_lr_scale, float)
rval[self.W] = self.W_lr_scale
if not hasattr(self, 'b_lr_scale'):
self.b_lr_scale = None
if self.b_lr_scale is not None:
assert isinstance(self.b_lr_scale, float)
rval[self.b] = self.b_lr_scale
return rval
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return self.output_space
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
mx = state.max(axis=1)
return OrderedDict([
('mean_max_class' , mx.mean()),
('max_max_class' , mx.max()),
('min_max_class' , mx.min())
])
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
if not isinstance(space, Space):
raise TypeError("Expected Space, got "+
str(space)+" of type "+str(type(space)))
self.input_dim = space.get_total_dimension()
self.needs_reformat = not isinstance(space, VectorSpace)
self.desired_space = VectorSpace(self.input_dim)
if not self.needs_reformat:
assert self.desired_space == self.input_space
rng = self.dbm.rng
if self.irange is not None:
assert self.sparse_init is None
W = rng.uniform(-self.irange,self.irange, (self.input_dim,self.n_classes))
else:
assert self.sparse_init is not None
W = np.zeros((self.input_dim, self.n_classes))
for i in xrange(self.n_classes):
for j in xrange(self.sparse_init):
idx = rng.randint(0, self.input_dim)
while W[idx, i] != 0.:
idx = rng.randint(0, self.input_dim)
W[idx, i] = rng.randn() * self.sparse_istdev
self.W = sharedX(W, 'softmax_W' )
self._params = [ self.b, self.W ]
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
if not isinstance(self.input_space, Conv2DSpace):
raise NotImplementedError()
desired = self.W.get_value().T
ipt = self.desired_space.format_as(desired, self.input_space)
rval = Conv2DSpace.convert_numpy(ipt, self.input_space.axes, ('b', 0, 1, 'c'))
return rval
def get_weights(self):
"""
.. todo::
WRITEME
"""
if not isinstance(self.input_space, VectorSpace):
raise NotImplementedError()
return self.W.get_value()
def set_weights(self, weights):
"""
.. todo::
WRITEME
"""
self.W.set_value(weights)
def set_biases(self, biases, recenter=False):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
if recenter:
assert self.center
self.offset.set_value( (np.exp(biases) / np.exp(biases).sum()).astype(self.offset.dtype))
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value()
def get_weights_format(self):
"""
.. todo::
WRITEME
"""
return ('v', 'h')
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
if self.copies != 1:
raise NotImplementedError("need to draw self.copies samples and average them together.")
if state_above is not None:
# If you implement this case, also add a unit test for it.
# Or at least add a warning that it is not tested.
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
self.input_space.validate(state_below)
# patch old pickle files
if not hasattr(self, 'needs_reformat'):
self.needs_reformat = self.needs_reshape
del self.needs_reshape
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
self.desired_space.validate(state_below)
z = T.dot(state_below, self.W) + self.b
h_exp = T.nnet.softmax(z)
h_sample = theano_rng.multinomial(pvals = h_exp, dtype = h_exp.dtype)
return h_sample
def mf_update(self, state_below, state_above = None, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
if state_above is not None:
raise NotImplementedError()
if double_weights:
raise NotImplementedError()
self.input_space.validate(state_below)
# patch old pickle files
if not hasattr(self, 'needs_reformat'):
self.needs_reformat = self.needs_reshape
del self.needs_reshape
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if value.shape[0] != self.dbm.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert self.W.ndim == 2
assert state_below.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
assert value.shape[0] == self.dbm.batch_size
return rval
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'copies'):
self.copies = 1
rval = T.dot(downward_state, self.W.T) * self.copies
rval = self.desired_space.format_as(rval, self.input_space)
return rval
def recons_cost(self, Y, Y_hat_unmasked, drop_mask_Y, scale):
"""
The cost of reconstructing `Y` as `Y_hat`. Specifically,
the negative log probability.
This cost is for use with multi-prediction training.
Parameters
----------
Y : target space batch
The data labels
Y_hat_unmasked : target space batch
The output of this layer's `mf_update`; the predicted
values of `Y`. Even though the model is only predicting
the dropped values, we take predictions for all the
values here.
drop_mask_Y : 1-D theano tensor
A batch of 0s/1s, with 1s indicating that variables
have been dropped, and should be included in the
reconstruction cost. One indicator per example in the
batch, since each example in this layer only has one
random variable in it.
scale : float
Multiply the cost by this amount.
We need to do this because the visible layer also goes into
the cost. We use the mean over units and examples, so that
the scale of the cost doesn't change too much with batch
size or example size.
We need to multiply this cost by scale to make sure that
it is put on the same scale as the reconstruction cost
for the visible units. ie, scale should be 1/nvis
"""
Y_hat = Y_hat_unmasked
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
masked = log_prob_of * drop_mask_Y
assert masked.ndim == 1
rval = masked.mean() * scale * self.copies
return - rval
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
rval = T.nnet.softmax(self.b.dimshuffle('x', 0)) + T.alloc(0., self.dbm.batch_size, self.n_classes).astype(config.floatX)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
""" Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
if self.copies != 1:
raise NotImplementedError("need to make self.copies samples and average them together.")
t1 = time.time()
empty_input = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.b
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
h_exp = T.nnet.softmax(default_z)
h_sample = theano_rng.multinomial(pvals = h_exp, dtype = h_exp.dtype)
h_state = sharedX( self.output_space.get_origin_batch(
num_examples))
t2 = time.time()
f = function([], updates = [(
h_state , h_sample
)])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took {1}'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
h_state.name = 'softmax_sample_shared'
return h_state
def make_symbolic_state(self, num_examples, theano_rng):
"""
.. todo::
WRITEME
"""
"""
Returns a symbolic variable containing an actual state
(not a mean field state) for this variable.
"""
if self.copies != 1:
raise NotImplementedError("need to make self.copies samples and average them together.")
default_z = T.alloc(self.b, num_examples, self.n_classes)
h_exp = T.nnet.softmax(default_z)
h_sample = theano_rng.multinomial(pvals=h_exp, dtype=h_exp.dtype)
return h_sample
def get_weight_decay(self, coeff):
"""
.. todo::
WRITEME
"""
if isinstance(coeff, str):
coeff = float(coeff)
assert isinstance(coeff, float) or hasattr(coeff, 'dtype')
return coeff * T.sqr(self.W).sum()
def upward_state(self, state):
"""
.. todo::
WRITEME
"""
if self.center:
return state - self.offset
return state
def downward_state(self, state):
"""
.. todo::
WRITEME
"""
if not hasattr(self, 'center'):
self.center = False
if self.center:
"""TODO: write a unit test verifying that inference or sampling
below a centered Softmax layer works"""
return state - self.offset
return state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
if self.center:
state = state - self.offset
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
self.desired_space.validate(state_below)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(state, self.b)
weights_term = (T.dot(state_below, self.W) * state).sum(axis=1)
rval = -bias_term - weights_term
rval *= self.copies
assert rval.ndim == 1
return rval
def init_inpainting_state(self, Y, noise):
"""
.. todo::
WRITEME
"""
if noise:
theano_rng = make_theano_rng(None, 2012+10+30, which_method="binomial")
return T.nnet.softmax(theano_rng.normal(avg=0., size=Y.shape, std=1., dtype='float32'))
rval = T.nnet.softmax(self.b)
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = 1
if not self.learn_init_inpainting_state:
rval = block_gradient(rval)
return rval
def install_presynaptic_outputs(self, outputs_dict, batch_size):
"""
.. todo::
WRITEME
"""
assert self.presynaptic_name not in outputs_dict
outputs_dict[self.presynaptic_name] = self.output_space.make_shared_batch(batch_size, self.presynaptic_name)
class GaussianVisLayer(VisibleLayer):
"""
Implements a visible layer that is conditionally gaussian with
diagonal variance. The layer lives in a Conv2DSpace.
Parameters
----------
rows, cols, channels : WRITEME
the shape of the space
learn_init_inpainting : bool, optional
WRITEME
nvis : WRITEME
init_beta : WRITEME
the initial value of the precision parameter
min_beta : WRITEME
clip beta so it is at least this big (default 1)
init_mu : WRITEME
the initial value of the mean parameter
tie_beta : WRITEME
None or a string specifying how to tie beta 'locations' = tie beta
across locations, ie beta should be a vector with one elem per channel
tie_mu : WRITEME
None or a string specifying how to tie mu 'locations' = tie mu across
locations, ie mu should be a vector with one elem per channel
bias_from_marginals : WRITEME
beta_lr_scale : WRITEME
axes : tuple
WRITEME
"""
def __init__(self,
rows = None,
cols = None,
learn_init_inpainting_state=True,
channels = None,
nvis = None,
init_beta = 1.,
min_beta = 1.,
init_mu = None,
tie_beta = None,
tie_mu = None,
bias_from_marginals = None,
beta_lr_scale = 'by_sharing',
axes = ('b', 0, 1, 'c')):
super(type(self), self).__init__()
warnings.warn("GaussianVisLayer math very faith based, need to finish working through gaussian.lyx")
self.__dict__.update(locals())
del self.self
if bias_from_marginals is not None:
del self.bias_from_marginals
if self.nvis is None:
raise NotImplementedError()
assert init_mu is None
init_mu = bias_from_marginals.X.mean(axis=0)
if init_mu is None:
init_mu = 0.
if nvis is None:
assert rows is not None
assert cols is not None
assert channels is not None
self.space = Conv2DSpace(shape=[rows,cols], num_channels=channels, axes=axes)
# To make GaussianVisLayer compatible with any axis ordering
self.batch_axis=list(axes).index('b')
self.axes_to_sum = list(range(len(axes)))
self.axes_to_sum.remove(self.batch_axis)
else:
assert rows is None
assert cols is None
assert channels is None
self.space = VectorSpace(nvis)
self.axes_to_sum = 1
self.batch_axis = None
self.input_space = self.space
origin = self.space.get_origin()
beta_origin = origin.copy()
assert tie_beta in [ None, 'locations']
if tie_beta == 'locations':
assert nvis is None
beta_origin = np.zeros((self.space.num_channels,))
self.beta = sharedX(beta_origin + init_beta,name = 'beta')
assert self.beta.ndim == beta_origin.ndim
mu_origin = origin.copy()
assert tie_mu in [None, 'locations']
if tie_mu == 'locations':
assert nvis is None
mu_origin = np.zeros((self.space.num_channels,))
self.mu = sharedX( mu_origin + init_mu, name = 'mu')
assert self.mu.ndim == mu_origin.ndim
def get_monitoring_channels(self):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
return rval
def get_params(self):
"""
.. todo::
WRITEME
"""
if self.mu is None:
return [self.beta]
return [self.beta, self.mu]
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
if self.nvis is None:
rows, cols = self.space.shape
num_loc = float(rows * cols)
assert self.tie_beta in [None, 'locations']
if self.beta_lr_scale == 'by_sharing':
if self.tie_beta == 'locations':
assert self.nvis is None
rval[self.beta] = 1. / num_loc
elif self.beta_lr_scale == None:
pass
else:
rval[self.beta] = self.beta_lr_scale
assert self.tie_mu in [None, 'locations']
if self.tie_mu == 'locations':
warn = True
assert self.nvis is None
rval[self.mu] = 1./num_loc
logger.warning("mu lr_scaler hardcoded to 1/sharing")
return rval
@functools.wraps(Model._modify_updates)
def _modify_updates(self, updates):
if self.beta in updates:
updated_beta = updates[self.beta]
updates[self.beta] = T.clip(updated_beta,
self.min_beta,1e6)
def set_biases(self, bias):
"""
Set mean parameter
Parameters
----------
bias: WRITEME
Vector of size nvis
"""
self.mu = sharedX(bias, name = 'mu')
def broadcasted_mu(self):
"""
Returns mu, broadcasted to have the same shape as a batch of data
"""
if self.tie_mu == 'locations':
def f(x):
if x == 'c':
return 0
return 'x'
axes = [f(ax) for ax in self.axes]
rval = self.mu.dimshuffle(*axes)
else:
assert self.tie_mu is None
if self.nvis is None:
axes = [0, 1, 2]
axes.insert(self.axes.index('b'), 'x')
rval = self.mu.dimshuffle(*axes)
else:
rval = self.mu.dimshuffle('x', 0)
self.input_space.validate(rval)
return rval
def broadcasted_beta(self):
"""
Returns beta, broadcasted to have the same shape as a batch of data
"""
return self.broadcast_beta(self.beta)
def broadcast_beta(self, beta):
"""
.. todo::
WRITEME
"""
"""
Returns beta, broadcasted to have the same shape as a batch of data
"""
if self.tie_beta == 'locations':
def f(x):
if x == 'c':
return 0
return 'x'
axes = [f(ax) for ax in self.axes]
rval = beta.dimshuffle(*axes)
else:
assert self.tie_beta is None
if self.nvis is None:
axes = [0, 1, 2]
axes.insert(self.axes.index('b'), 'x')
rval = beta.dimshuffle(*axes)
else:
rval = beta.dimshuffle('x', 0)
self.input_space.validate(rval)
return rval
def init_inpainting_state(self, V, drop_mask, noise = False, return_unmasked = False):
"""
.. todo::
WRITEME
"""
"""for Vv, drop_mask_v in get_debug_values(V, drop_mask):
assert Vv.ndim == 4
assert drop_mask_v.ndim in [3,4]
for i in xrange(drop_mask.ndim):
if Vv.shape[i] != drop_mask_v.shape[i]:
print(Vv.shape)
print(drop_mask_v.shape)
assert False
"""
unmasked = self.broadcasted_mu()
if drop_mask is None:
assert not noise
assert not return_unmasked
return unmasked
masked_mu = unmasked * drop_mask
if not hasattr(self, 'learn_init_inpainting_state'):
self.learn_init_inpainting_state = True
if not self.learn_init_inpainting_state:
masked_mu = block_gradient(masked_mu)
masked_mu.name = 'masked_mu'
if noise:
theano_rng = make_theano_rng(None, 42, which_method="binomial")
unmasked = theano_rng.normal(avg = 0.,
std = 1., size = masked_mu.shape,
dtype = masked_mu.dtype)
masked_mu = unmasked * drop_mask
masked_mu.name = 'masked_noise'
masked_V = V * (1-drop_mask)
rval = masked_mu + masked_V
rval.name = 'init_inpainting_state'
if return_unmasked:
return rval, unmasked
return rval
def expected_energy_term(self, state, average, state_below = None, average_below = None):
"""
.. todo::
WRITEME
"""
assert state_below is None
assert average_below is None
self.space.validate(state)
if average:
raise NotImplementedError(str(type(self))+" doesn't support integrating out variational parameters yet.")
else:
rval = 0.5 * (self.beta * T.sqr(state - self.mu)).sum(axis=self.axes_to_sum)
assert rval.ndim == 1
return rval
def inpaint_update(self, state_above, layer_above, drop_mask = None, V = None,
return_unmasked = False):
"""
.. todo::
WRITEME
"""
msg = layer_above.downward_message(state_above)
mu = self.broadcasted_mu()
z = msg + mu
z.name = 'inpainting_z_[unknown_iter]'
if drop_mask is not None:
rval = drop_mask * z + (1-drop_mask) * V
else:
rval = z
rval.name = 'inpainted_V[unknown_iter]'
if return_unmasked:
return rval, z
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
assert state_below is None
msg = layer_above.downward_message(state_above)
mu = self.mu
z = msg + mu
rval = theano_rng.normal(size = z.shape, avg = z, dtype = z.dtype,
std = 1. / T.sqrt(self.beta))
return rval
def recons_cost(self, V, V_hat_unmasked, drop_mask = None, use_sum=False):
"""
.. todo::
WRITEME
"""
return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, drop_mask=drop_mask, use_sum=use_sum, beta=self.beta)
def _recons_cost(self, V, V_hat_unmasked, beta, drop_mask=None, use_sum=False):
"""
.. todo::
WRITEME
"""
V_hat = V_hat_unmasked
assert V.ndim == V_hat.ndim
beta = self.broadcasted_beta()
unmasked_cost = 0.5 * beta * T.sqr(V-V_hat) - 0.5*T.log(beta / (2*np.pi))
assert unmasked_cost.ndim == V_hat.ndim
if drop_mask is None:
masked_cost = unmasked_cost
else:
masked_cost = drop_mask * unmasked_cost
if use_sum:
return masked_cost.mean(axis=0).sum()
return masked_cost.mean()
return masked_cost.mean()
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
if self.nvis is None and total_state.ndim != 4:
raise ValueError("total_state should have 4 dimensions, has "+str(total_state.ndim))
assert total_state is not None
V = total_state
self.input_space.validate(V)
upward_state = (V - self.broadcasted_mu()) * self.broadcasted_beta()
return upward_state
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
shape = [num_examples]
if self.nvis is None:
rows, cols = self.space.shape
channels = self.space.num_channels
shape.append(rows)
shape.append(cols)
shape.append(channels)
else:
shape.append(self.nvis)
sample = numpy_rng.randn(*shape)
sample *= 1./np.sqrt(self.beta.get_value())
sample += self.mu.get_value()
rval = sharedX(sample, name = 'v_sample_shared')
return rval
def install_presynaptic_outputs(self, outputs_dict, batch_size):
"""
.. todo::
WRITEME
"""
outputs_dict['output_V_weighted_pred_sum'] = self.space.make_shared_batch(batch_size)
def ensemble_prediction(self, symbolic, outputs_dict, ensemble):
"""
.. todo::
WRITEME
"""
"""
Output a symbolic expression for V_hat_unmasked based on taking the
geometric mean over the ensemble and renormalizing.
n - 1 members of the ensemble have modified outputs_dict and the nth
gives its prediction in "symbolic". The parameters for the nth one
are currently loaded in the model.
"""
weighted_pred_sum = outputs_dict['output_V_weighted_pred_sum'] \
+ self.broadcasted_beta() * symbolic
beta_sum = sum(ensemble.get_ensemble_variants(self.beta))
unmasked_V_hat = weighted_pred_sum / self.broadcast_beta(beta_sum)
return unmasked_V_hat
def ensemble_recons_cost(self, V, V_hat_unmasked, drop_mask=None,
use_sum=False, ensemble=None):
"""
.. todo::
WRITEME
"""
beta = sum(ensemble.get_ensemble_variants(self.beta)) / ensemble.num_copies
return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, beta=beta, drop_mask=drop_mask,
use_sum=use_sum)
class ConvMaxPool(HiddenLayer):
"""
Implements a hidden convolutional layer. The layer lives in a Conv2DSpace.
Parameters
----------
output_channels : WRITEME
kernel_rows : WRITEME
kernel_cols : WRITEME
pool_rows : WRITEME
pool_cols : WRITEME
layer_name : str
Name of the layer
center : bool
If True, use Gregoire Montavon's centering trick
irange : float
If specified, initialize the weights in U(-irange, irange)
sparse_init : WRITEME
scale_by_sharing : WRITEME
init_bias : WRITEME
border_mode : WRITEME
output_axes : WRITEME
"""
def __init__(self,
output_channels,
kernel_rows,
kernel_cols,
pool_rows,
pool_cols,
layer_name,
center = False,
irange = None,
sparse_init = None,
scale_by_sharing = True,
init_bias = 0.,
border_mode = 'valid',
output_axes = ('b', 'c', 0, 1)):
super(ConvMaxPool, self).__init__()
self.__dict__.update(locals())
del self.self
assert (irange is None) != (sparse_init is None)
self.b = sharedX( np.zeros((output_channels,)) + init_bias, name = layer_name + '_b')
assert border_mode in ['full','valid']
def broadcasted_bias(self):
"""
.. todo::
WRITEME
"""
assert self.b.ndim == 1
shuffle = [ 'x' ] * 4
shuffle[self.output_axes.index('c')] = 0
return self.b.dimshuffle(*shuffle)
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace((self.h_space, self.output_space))
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
""" Note: this resets parameters!"""
if not isinstance(space, Conv2DSpace):
raise TypeError("ConvMaxPool can only act on a Conv2DSpace, but received " +
str(type(space))+" as input.")
self.input_space = space
self.input_rows, self.input_cols = space.shape
self.input_channels = space.num_channels
if self.border_mode == 'valid':
self.h_rows = self.input_rows - self.kernel_rows + 1
self.h_cols = self.input_cols - self.kernel_cols + 1
else:
assert self.border_mode == 'full'
self.h_rows = self.input_rows + self.kernel_rows - 1
self.h_cols = self.input_cols + self.kernel_cols - 1
if not( self.h_rows % self.pool_rows == 0):
raise ValueError("h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d" %
(self.h_rows, self.pool_rows, self.h_rows % self.pool_rows))
assert self.h_cols % self.pool_cols == 0
self.h_space = Conv2DSpace(shape = (self.h_rows, self.h_cols), num_channels = self.output_channels,
axes = self.output_axes)
self.output_space = Conv2DSpace(shape = (self.h_rows / self.pool_rows,
self.h_cols / self.pool_cols),
num_channels = self.output_channels,
axes = self.output_axes)
logger.info('{0}: detector shape: {1} '
'pool shape: {2}'.format(self.layer_name,
self.h_space.shape,
self.output_space.shape))
if tuple(self.output_axes) == ('b', 0, 1, 'c'):
self.max_pool = max_pool_b01c
elif tuple(self.output_axes) == ('b', 'c', 0, 1):
self.max_pool = max_pool
else:
raise NotImplementedError()
if self.irange is not None:
self.transformer = make_random_conv2D(self.irange, input_space = space,
output_space = self.h_space, kernel_shape = (self.kernel_rows, self.kernel_cols),
batch_size = self.dbm.batch_size, border_mode = self.border_mode, rng = self.dbm.rng)
else:
self.transformer = make_sparse_random_conv2D(self.sparse_init, input_space = space,
output_space = self.h_space, kernel_shape = (self.kernel_rows, self.kernel_cols),
batch_size = self.dbm.batch_size, border_mode = self.border_mode, rng = self.dbm.rng)
self.transformer._filters.name = self.layer_name + '_W'
W ,= self.transformer.get_params()
assert W.name is not None
if self.center:
p_ofs, h_ofs = self.init_mf_state()
self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset')
self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset')
f = function([], updates={self.p_offset: p_ofs[0,:,:,:], self.h_offset: h_ofs[0,:,:,:]})
f()
def get_params(self):
"""
.. todo::
WRITEME
"""
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
return [ W, self.b]
def state_to_b01c(self, state):
"""
.. todo::
WRITEME
"""
if tuple(self.output_axes) == ('b',0,1,'c'):
return state
return [ Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c'))
for elem in state ]
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
if c == 0.:
continue
# Range over everything but the channel index
# theano can only take gradient through max if the max is over 1 axis or all axes
# so I manually unroll the max for the case I use here
assert self.h_space.axes == ('b', 'c', 0, 1)
assert self.output_space.axes == ('b', 'c', 0, 1)
mx = s.max(axis=3).max(axis=2).max(axis=0)
assert hasattr(mx.owner.op, 'grad')
mn = s.min(axis=3).max(axis=2).max(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mx.ndim == 1
assert mn.ndim == 1
r = mx - mn
rval += (1. - r).mean() * c
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME
"""
"""
target: if pools contain more than one element, should be a list with
two elements. the first element is for the pooling units and
the second for the detector units.
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(target, float)
assert isinstance(coeff, float)
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = 0.
eps = [eps]
else:
if eps is None:
eps = [0., 0.]
assert all([len(elem) == 2 for elem in [state, target, coeff]])
p_target, h_target = target
if h_target > p_target and (coeff[0] != 0. and coeff[1] != 0.):
# note that, within each group, E[p] is the sum of E[h]
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
if c == 0.:
continue
# Average over everything but the channel index
m = s.mean(axis= [ ax for ax in range(4) if self.output_axes[ax] != 'c' ])
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
if self.scale_by_sharing:
# scale each learning rate by 1 / # times param is reused
h_rows, h_cols = self.h_space.shape
num_h = float(h_rows * h_cols)
return OrderedDict([(self.transformer._filters, 1./num_h),
(self.b, 1. / num_h)])
else:
return OrderedDict()
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return p
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return h
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
P, H = state
if tuple(self.output_axes) == ('b',0,1,'c'):
p_max = P.max(axis=(0,1,2))
p_min = P.min(axis=(0,1,2))
p_mean = P.mean(axis=(0,1,2))
else:
assert tuple(self.output_axes) == ('b','c',0,1)
p_max = P.max(axis=(0,2,3))
p_min = P.min(axis=(0,2,3))
p_mean = P.mean(axis=(0,2,3))
p_range = p_max - p_min
rval = {
'p_max_max' : p_max.max(),
'p_max_mean' : p_max.mean(),
'p_max_min' : p_max.min(),
'p_min_max' : p_min.max(),
'p_min_mean' : p_min.mean(),
'p_min_max' : p_min.max(),
'p_range_max' : p_range.max(),
'p_range_mean' : p_range.mean(),
'p_range_min' : p_range.min(),
'p_mean_max' : p_mean.max(),
'p_mean_mean' : p_mean.mean(),
'p_mean_min' : p_mean.min()
}
return rval
def get_weight_decay(self, coeffs):
"""
.. todo::
WRITEME
"""
W , = self.transformer.get_params()
return coeffs * T.sqr(W).sum()
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if not hasattr(state_below, 'ndim'):
raise TypeError("state_below should be a TensorType, got " +
str(state_below) + " of type " + str(type(state_below)))
if state_below.ndim != 4:
raise ValueError("state_below should have ndim 4, has "+str(state_below.ndim))
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = self.max_pool(z, (self.pool_rows, self.pool_cols), msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
if state_above is not None:
msg = layer_above.downward_message(state_above)
try:
self.output_space.validate(msg)
except TypeError as e:
reraise_as(TypeError(str(type(layer_above))+".downward_message gave something that was not the right type: "+str(e)))
else:
msg = None
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
p, h, p_sample, h_sample = self.max_pool(z,
(self.pool_rows, self.pool_cols), msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
self.h_space.validate(downward_state)
return self.transformer.lmul_T(downward_state)
def set_batch_size(self, batch_size):
"""
.. todo::
WRITEME
"""
self.transformer.set_batch_size(batch_size)
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
outp, inp, rows, cols = range(4)
raw = self.transformer._filters.get_value()
return np.transpose(raw,(outp,rows,cols,inp))
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
default_z = self.broadcasted_bias()
shape = {
'b': self.dbm.batch_size,
0: self.h_space.shape[0],
1: self.h_space.shape[1],
'c': self.h_space.num_channels
}
# work around theano bug with broadcasted stuff
default_z += T.alloc(*([0.]+[shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype)
assert default_z.ndim == 4
p, h = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols))
return p, h
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
""" Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
t1 = time.time()
empty_input = self.h_space.get_origin_batch(self.dbm.batch_size)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.broadcasted_bias()
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
p_exp, h_exp, p_sample, h_sample = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols),
theano_rng = theano_rng)
p_state = sharedX( self.output_space.get_origin_batch(
self.dbm.batch_size))
t2 = time.time()
f = function([], updates = [
(p_state, p_sample),
(h_state, h_sample)
])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1,2,3))
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1,2,3))
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval
class ConvC01B_MaxPool(HiddenLayer):
"""
.. todo::
WRITEME
"""
def __init__(self,
output_channels,
kernel_shape,
pool_rows,
pool_cols,
layer_name,
center = False,
irange = None,
sparse_init = None,
scale_by_sharing = True,
init_bias = 0.,
pad = 0,
partial_sum = 1):
self.__dict__.update(locals())
del self.self
assert (irange is None) != (sparse_init is None)
self.output_axes = ('c', 0, 1, 'b')
self.detector_channels = output_channels
self.tied_b = 1
def broadcasted_bias(self):
"""
.. todo::
WRITEME
"""
if self.b.ndim != 1:
raise NotImplementedError()
shuffle = [ 'x' ] * 4
shuffle[self.output_axes.index('c')] = 0
return self.b.dimshuffle(*shuffle)
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace((self.h_space, self.output_space))
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
""" Note: this resets parameters!"""
setup_detector_layer_c01b(layer=self,
input_space=space,
rng=self.dbm.rng,)
if not tuple(space.axes) == ('c', 0, 1, 'b'):
raise AssertionError("You're not using c01b inputs. Ian is enforcing c01b inputs while developing his pipeline to make sure it runs at maximal speed. If you really don't want to use c01b inputs, you can remove this check and things should work. If they don't work it's only because they're not tested.")
if self.dummy_channels != 0:
raise NotImplementedError(str(type(self))+" does not support adding dummy channels for cuda-convnet compatibility yet, you must implement that feature or use inputs with <=3 channels or a multiple of 4 channels")
self.input_rows = self.input_space.shape[0]
self.input_cols = self.input_space.shape[1]
self.h_rows = self.detector_space.shape[0]
self.h_cols = self.detector_space.shape[1]
if not(self.h_rows % self.pool_rows == 0):
raise ValueError(self.layer_name + ": h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d" %
(self.h_rows, self.pool_rows, self.h_rows % self.pool_rows))
assert self.h_cols % self.pool_cols == 0
self.h_space = Conv2DSpace(shape = (self.h_rows, self.h_cols), num_channels = self.output_channels,
axes = self.output_axes)
self.output_space = Conv2DSpace(shape = (self.h_rows / self.pool_rows,
self.h_cols / self.pool_cols),
num_channels = self.output_channels,
axes = self.output_axes)
logger.info('{0} : detector shape: {1} '
'pool shape: {2}'.format(self.layer_name,
self.h_space.shape,
self.output_space.shape))
assert tuple(self.output_axes) == ('c', 0, 1, 'b')
self.max_pool = max_pool_c01b
if self.center:
p_ofs, h_ofs = self.init_mf_state()
self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset')
self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset')
f = function([], updates={self.p_offset: p_ofs[:,:,:,0], self.h_offset: h_ofs[:,:,:,0]})
f()
def get_params(self):
"""
.. todo::
WRITEME
"""
assert self.b.name is not None
W ,= self.transformer.get_params()
assert W.name is not None
return [ W, self.b]
def state_to_b01c(self, state):
"""
.. todo::
WRITEME
"""
if tuple(self.output_axes) == ('b',0,1,'c'):
return state
return [ Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c'))
for elem in state ]
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(coeffs, float)
_, state = state
state = [state]
coeffs = [coeffs]
else:
assert all([len(elem) == 2 for elem in [state, coeffs]])
for s, c in safe_zip(state, coeffs):
if c == 0.:
continue
# Range over everything but the channel index
# theano can only take gradient through max if the max is over 1 axis or all axes
# so I manually unroll the max for the case I use here
assert self.h_space.axes == ('b', 'c', 0, 1)
assert self.output_space.axes == ('b', 'c', 0, 1)
mx = s.max(axis=3).max(axis=2).max(axis=0)
assert hasattr(mx.owner.op, 'grad')
mn = s.min(axis=3).max(axis=2).max(axis=0)
assert hasattr(mn.owner.op, 'grad')
assert mx.ndim == 1
assert mn.ndim == 1
r = mx - mn
rval += (1. - r).mean() * c
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME properly
Parameters
----------
state : WRITEME
target : WRITEME
if pools contain more than one element, should be a list
with two elements. the first element is for the pooling
units and the second for the detector units.
coeff : WRITEME
eps : WRITEME
"""
rval = 0.
if self.pool_rows == 1 and self.pool_cols == 1:
# If the pool size is 1 then pools = detectors
# and we should not penalize pools and detectors separately
assert len(state) == 2
assert isinstance(target, float)
assert isinstance(coeff, float)
_, state = state
state = [state]
target = [target]
coeff = [coeff]
if eps is None:
eps = 0.
eps = [eps]
else:
if eps is None:
eps = [0., 0.]
assert all([len(elem) == 2 for elem in [state, target, coeff]])
p_target, h_target = target
if h_target > p_target and (coeff[0] != 0. and coeff[1] != 0.):
# note that, within each group, E[p] is the sum of E[h]
warnings.warn("Do you really want to regularize the detector units to be more active than the pooling units?")
for s, t, c, e in safe_zip(state, target, coeff, eps):
if c == 0.:
continue
# Average over everything but the channel index
m = s.mean(axis= [ ax for ax in range(4) if self.output_axes[ax] != 'c' ])
assert m.ndim == 1
rval += T.maximum(abs(m-t)-e,0.).mean()*c
return rval
def get_lr_scalers(self):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
if self.scale_by_sharing:
# scale each learning rate by 1 / # times param is reused
h_rows, h_cols = self.h_space.shape
num_h = float(h_rows * h_cols)
rval[self.transformer._filters] = 1. /num_h
rval[self.b] = 1. / num_h
return rval
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return p
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
p,h = total_state
if not hasattr(self, 'center'):
self.center = False
if self.center:
p -= self.p_offset
h -= self.h_offset
return h
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
P, H = state
axes = tuple([i for i, ax in enumerate(self.output_axes) if ax != 'c'])
p_max = P.max(axis=(0,1,2))
p_min = P.min(axis=(0,1,2))
p_mean = P.mean(axis=(0,1,2))
p_range = p_max - p_min
rval = {
'p_max_max' : p_max.max(),
'p_max_mean' : p_max.mean(),
'p_max_min' : p_max.min(),
'p_min_max' : p_min.max(),
'p_min_mean' : p_min.mean(),
'p_min_max' : p_min.max(),
'p_range_max' : p_range.max(),
'p_range_mean' : p_range.mean(),
'p_range_min' : p_range.min(),
'p_mean_max' : p_mean.max(),
'p_mean_mean' : p_mean.mean(),
'p_mean_min' : p_mean.min()
}
return rval
def get_weight_decay(self, coeffs):
"""
.. todo::
WRITEME
"""
W , = self.transformer.get_params()
return coeffs * T.sqr(W).sum()
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if not hasattr(state_below, 'ndim'):
raise TypeError("state_below should be a TensorType, got " +
str(state_below) + " of type " + str(type(state_below)))
if state_below.ndim != 4:
raise ValueError("state_below should have ndim 4, has "+str(state_below.ndim))
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = self.max_pool(z, (self.pool_rows, self.pool_cols), msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("Need to update for C01B")
if state_above is not None:
msg = layer_above.downward_message(state_above)
try:
self.output_space.validate(msg)
except TypeError as e:
reraise_as(TypeError(str(type(layer_above))+".downward_message gave something that was not the right type: "+str(e)))
else:
msg = None
z = self.transformer.lmul(state_below) + self.broadcasted_bias()
p, h, p_sample, h_sample = self.max_pool(z,
(self.pool_rows, self.pool_cols), msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
self.h_space.validate(downward_state)
return self.transformer.lmul_T(downward_state)
def set_batch_size(self, batch_size):
"""
.. todo::
WRITEME
"""
self.transformer.set_batch_size(batch_size)
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
return self.transformer.get_weights_topo()
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
default_z = self.broadcasted_bias()
shape = {
'b': self.dbm.batch_size,
0: self.h_space.shape[0],
1: self.h_space.shape[1],
'c': self.h_space.num_channels
}
# work around theano bug with broadcasted stuff
default_z += T.alloc(*([0.]+[shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype)
assert default_z.ndim == 4
p, h = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols))
return p, h
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME properly
Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
raise NotImplementedError("Need to update for C01B")
t1 = time.time()
empty_input = self.h_space.get_origin_batch(self.dbm.batch_size)
h_state = sharedX(empty_input)
default_z = T.zeros_like(h_state) + self.broadcasted_bias()
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
p_exp, h_exp, p_sample, h_sample = self.max_pool(
z = default_z,
pool_shape = (self.pool_rows, self.pool_cols),
theano_rng = theano_rng)
p_state = sharedX( self.output_space.get_origin_batch(
self.dbm.batch_size))
t2 = time.time()
f = function([], updates = [
(p_state, p_sample),
(h_state, h_sample)
])
t3 = time.time()
f()
t4 = time.time()
logger.info('{0}.make_state took {1}'.format(self, t4-t1))
logger.info('\tcompose time: {0}'.format(t2-t1))
logger.info('\tcompile time: {0}'.format(t3-t2))
logger.info('\texecute time: {0}'.format(t4-t3))
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("Need to update for C01B")
self.input_space.validate(state_below)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1,2,3))
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1,2,3))
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval
class BVMP_Gaussian(BinaryVectorMaxPool):
"""
Like BinaryVectorMaxPool, but must have GaussianVisLayer
as its input. Uses its beta to bias the hidden units appropriately.
See gaussian.lyx
beta is *not* considered a parameter of this layer, it's just an
external factor influencing how this layer behaves.
Gradient can still flow to beta, but it will only be included in
the parameters list if some class other than this layer includes it.
.. todo::
WRITEME : parameter list
"""
def __init__(self,
input_layer,
detector_layer_dim,
pool_size,
layer_name,
irange = None,
sparse_init = None,
sparse_stdev = 1.,
include_prob = 1.0,
init_bias = 0.,
W_lr_scale = None,
b_lr_scale = None,
center = False,
mask_weights = None,
max_col_norm = None,
copies = 1):
warnings.warn("BVMP_Gaussian math is very faith-based, need to complete gaussian.lyx")
args = locals()
del args['input_layer']
del args['self']
super(BVMP_Gaussian, self).__init__(**args)
self.input_layer = input_layer
def get_weights(self):
"""
.. todo::
WRITEME
"""
if self.requires_reformat:
# This is not really an unimplemented case.
# We actually don't know how to format the weights
# in design space. We got the data in topo space
# and we don't have access to the dataset
raise NotImplementedError()
W ,= self.transformer.get_params()
W = W.get_value()
x = input("multiply by beta?")
if x == 'y':
beta = self.input_layer.beta.get_value()
return (W.T * beta).T
assert x == 'n'
return W
def set_weights(self, weights):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("beta would make get_weights for visualization not correspond to set_weights")
W, = self.transformer.get_params()
W.set_value(weights)
def set_biases(self, biases, recenter = False):
"""
.. todo::
WRITEME
"""
self.b.set_value(biases)
if recenter:
assert self.center
if self.pool_size != 1:
raise NotImplementedError()
self.offset.set_value(sigmoid_numpy(self.b.get_value()))
def get_biases(self):
"""
.. todo::
WRITEME
"""
return self.b.get_value() - self.beta_bias().eval()
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("need to account for beta")
if self.copies != 1:
raise NotImplementedError()
if theano_rng is None:
raise ValueError("theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.")
if state_above is not None:
msg = layer_above.downward_message(state_above)
else:
msg = None
if self.requires_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
z = self.transformer.lmul(state_below) + self.b
p, h, p_sample, h_sample = max_pool_channels(z,
self.pool_size, msg, theano_rng)
return p_sample, h_sample
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
rval = self.transformer.lmul_T(downward_state)
if self.requires_reformat:
rval = self.desired_space.format_as(rval, self.input_space)
return rval * self.copies
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
# work around theano bug with broadcasted vectors
z = T.alloc(0., self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + \
self.b.dimshuffle('x', 0) + self.beta_bias()
rval = max_pool_channels(z = z,
pool_size = self.pool_size)
return rval
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME properly
Returns a shared variable containing an actual state
(not a mean field state) for this variable.
"""
raise NotImplementedError("need to account for beta")
if not hasattr(self, 'copies'):
self.copies = 1
if self.copies != 1:
raise NotImplementedError()
empty_input = self.h_space.get_origin_batch(num_examples)
empty_output = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
p_state = sharedX(empty_output)
theano_rng = make_theano_rng(None, numpy_rng.randint(2 ** 16),
which_method="binomial")
default_z = T.zeros_like(h_state) + self.b
p_exp, h_exp, p_sample, h_sample = max_pool_channels(
z = default_z,
pool_size = self.pool_size,
theano_rng = theano_rng)
assert h_sample.dtype == default_z.dtype
f = function([], updates = [
(p_state , p_sample),
(h_state , h_sample)
])
f()
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return p_state, h_state
def expected_energy_term(self, state, average, state_below, average_below):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("need to account for beta, and maybe some oether stuff")
# Don't need to do anything special for centering, upward_state / downward state
# make it all just work
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(downward_state, self.b)
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1)
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval * self.copies
def linear_feed_forward_approximation(self, state_below):
"""
.. todo::
WRITEME properly
Used to implement TorontoSparsity. Unclear exactly what properties of it are
important or how to implement it for other layers.
Properties it must have:
output is same kind of data structure (ie, tuple of theano 2-tensors)
as mf_update
Properties it probably should have for other layer types:
An infinitesimal change in state_below or the parameters should cause the same sign of change
in the output of linear_feed_forward_approximation and in mf_update
Should not have any non-linearities that cause the gradient to shrink
Should disregard top-down feedback
"""
raise NotImplementedError("need to account for beta")
z = self.transformer.lmul(state_below) + self.b
if self.pool_size != 1:
# Should probably implement sum pooling for the non-pooled version,
# but in reality it's not totally clear what the right answer is
raise NotImplementedError()
return z, z
def beta_bias(self):
"""
.. todo::
WRITEME
"""
W, = self.transformer.get_params()
beta = self.input_layer.beta
assert beta.ndim == 1
return - 0.5 * T.dot(beta, T.sqr(W))
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(operator.mul, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.b + self.beta_bias()
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
p,h = max_pool_channels(z, self.pool_size, msg)
p.name = self.layer_name + '_p_' + iter_name
h.name = self.layer_name + '_h_' + iter_name
return p, h
class CompositeLayer(HiddenLayer):
"""
A Layer constructing by aligning several other Layer
objects side by side
Parameters
----------
components : WRITEME
A list of layers that are combined to form this layer
inputs_to_components : None or dict mapping int to list of int
Should be None unless the input space is a CompositeSpace
If inputs_to_components[i] contains j, it means input i will
be given as input to component j.
If an input dodes not appear in the dictionary, it will be given
to all components.
This field allows one CompositeLayer to have another as input
without forcing each component to connect to all members
of the CompositeLayer below. For example, you might want to
have both densely connected and convolutional units in all
layers, but a convolutional unit is incapable of taking a
non-topological input space.
"""
def __init__(self, layer_name, components, inputs_to_components = None):
self.layer_name = layer_name
self.components = list(components)
assert isinstance(components, list)
for component in components:
assert isinstance(component, HiddenLayer)
self.num_components = len(components)
self.components = list(components)
if inputs_to_components is None:
self.inputs_to_components = None
else:
if not isinstance(inputs_to_components, dict):
raise TypeError("CompositeLayer expected inputs_to_components to be a dict, got "+str(type(inputs_to_components)))
self.inputs_to_components = OrderedDict()
for key in inputs_to_components:
assert isinstance(key, int)
assert key >= 0
value = inputs_to_components[key]
assert isinstance(value, list)
assert all([isinstance(elem, int) for elem in value])
assert min(value) >= 0
assert max(value) < self.num_components
self.inputs_to_components[key] = list(value)
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
if not isinstance(space, CompositeSpace):
assert self.inputs_to_components is None
self.routing_needed = False
else:
if self.inputs_to_components is None:
self.routing_needed = False
else:
self.routing_needed = True
assert max(self.inputs_to_components) < space.num_components
# Invert the dictionary
self.components_to_inputs = OrderedDict()
for i in xrange(self.num_components):
inputs = []
for j in xrange(space.num_components):
if i in self.inputs_to_components[j]:
inputs.append(i)
if len(inputs) < space.num_components:
self.components_to_inputs[i] = inputs
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_space = space.restrict(self.components_to_inputs[i])
else:
cur_space = space
component.set_input_space(cur_space)
self.output_space = CompositeSpace([ component.get_output_space() for component in self.components ])
def make_state(self, num_examples, numpy_rng):
"""
.. todo::
WRITEME
"""
return tuple(component.make_state(num_examples, numpy_rng) for
component in self.components)
def get_total_state_space(self):
"""
.. todo::
WRITEME
"""
return CompositeSpace([component.get_total_state_space() for component in self.components])
def set_batch_size(self, batch_size):
"""
.. todo::
WRITEME
"""
for component in self.components:
component.set_batch_size(batch_size)
def set_dbm(self, dbm):
"""
.. todo::
WRITEME
"""
for component in self.components:
component.set_dbm(dbm)
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
"""
.. todo::
WRITEME
"""
rval = []
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_state_below =self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = 'route_'+str(idx)+'_'+layer.layer_name
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if layer_above is not None:
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
mf_update = component.mf_update(state_below = cur_state_below,
state_above = state_above,
layer_above = cur_layer_above,
double_weights = double_weights,
iter_name = iter_name)
rval.append(mf_update)
return tuple(rval)
def init_mf_state(self):
"""
.. todo::
WRITEME
"""
return tuple([component.init_mf_state() for component in self.components])
def get_weight_decay(self, coeffs):
"""
.. todo::
WRITEME
"""
return sum([component.get_weight_decay(coeff) for component, coeff
in safe_zip(self.components, coeffs)])
def upward_state(self, total_state):
"""
.. todo::
WRITEME
"""
return tuple([component.upward_state(elem)
for component, elem in
safe_zip(self.components, total_state)])
def downward_state(self, total_state):
"""
.. todo::
WRITEME
"""
return tuple([component.downward_state(elem)
for component, elem in
safe_zip(self.components, total_state)])
def downward_message(self, downward_state):
"""
.. todo::
WRITEME
"""
if isinstance(self.input_space, CompositeSpace):
num_input_components = self.input_space.num_components
else:
num_input_components = 1
rval = [ None ] * num_input_components
def add(x, y):
if x is None:
return y
if y is None:
return x
return x + y
for i, packed in enumerate(safe_zip(self.components, downward_state)):
component, state = packed
if self.routing_needed and i in self.components_to_inputs:
input_idx = self.components_to_inputs[i]
else:
input_idx = range(num_input_components)
partial_message = component.downward_message(state)
if len(input_idx) == 1:
partial_message = [ partial_message ]
assert len(input_idx) == len(partial_message)
for idx, msg in safe_zip(input_idx, partial_message):
rval[idx] = add(rval[idx], msg)
if len(rval) == 1:
rval = rval[0]
else:
rval = tuple(rval)
self.input_space.validate(rval)
return rval
def get_l1_act_cost(self, state, target, coeff, eps):
"""
.. todo::
WRITEME
"""
return sum([ comp.get_l1_act_cost(s, t, c, e) \
for comp, s, t, c, e in safe_zip(self.components, state, target, coeff, eps)])
def get_range_rewards(self, state, coeffs):
"""
.. todo::
WRITEME
"""
return sum([comp.get_range_rewards(s, c)
for comp, s, c in safe_zip(self.components, state, coeffs)])
def get_params(self):
"""
.. todo::
WRITEME
"""
return reduce(lambda x, y: safe_union(x, y),
[component.get_params() for component in self.components])
def get_weights_topo(self):
"""
.. todo::
WRITEME
"""
logger.info('Get topological weights for which layer?')
for i, component in enumerate(self.components):
logger.info('{0} {1}'.format(i, component.layer_name))
x = input()
return self.components[int(x)].get_weights_topo()
def get_monitoring_channels_from_state(self, state):
"""
.. todo::
WRITEME
"""
rval = OrderedDict()
for layer, s in safe_zip(self.components, state):
d = layer.get_monitoring_channels_from_state(s)
for key in d:
rval[layer.layer_name+'_'+key] = d[key]
return rval
def sample(self, state_below = None, state_above = None,
layer_above = None,
theano_rng = None):
"""
.. todo::
WRITEME
"""
rval = []
for i, component in enumerate(self.components):
if self.routing_needed and i in self.components_to_inputs:
cur_state_below =self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = 'route_'+str(idx)+'_'+layer.layer_name
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if layer_above is not None:
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
sample = component.sample(state_below = cur_state_below,
state_above = state_above,
layer_above = cur_layer_above,
theano_rng = theano_rng)
rval.append(sample)
return tuple(rval)
| bsd-3-clause | b4a763d630728f22cf0d394d21c9bdf9 | 29.594906 | 315 | 0.53508 | 4.019698 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/space/__init__.py | 34 | 99992 | """
Classes that define how vector spaces are formatted
Most of our models can be viewed as linearly transforming
one vector space to another. These classes define how the
vector spaces should be represented as theano/numpy
variables.
For example, the VectorSpace class just represents a
vector space with a vector, and the model can transform
between spaces with a matrix multiply. The Conv2DSpace
represents a vector space as an image, and the model
can transform between spaces with a 2D convolution.
To make models as general as possible, models should be
written in terms of Spaces, rather than in terms of
numbers of hidden units, etc. The model should also be
written to transform between spaces using a generic
linear transformer from the pylearn2.linear module.
The Space class is needed so that the model can specify
what kinds of inputs it needs and what kinds of outputs
it will produce when communicating with other parts of
the library. The model also uses Space objects internally
to allocate parameters like hidden unit bias terms in
the right space.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import functools
import warnings
import numpy as np
from theano.compat.six.moves import xrange
import theano
import theano.sparse
from theano import tensor
from theano.tensor import TensorType
from theano.gof.op import get_debug_values
from theano.sandbox.cuda.type import CudaNdarrayType
from pylearn2.utils import py_integer_types, safe_zip, sharedX, wraps
from pylearn2.format.target_format import OneHotFormatter
if theano.sparse.enable_sparse:
# We know scipy.sparse is available
import scipy.sparse
def _is_batch_all(batch, predicate):
"""
Implementation of is_symbolic_batch() and is_numeric_batch().
Returns True iff predicate() returns True for all components of
(possibly composite) batch.
Parameters
----------
batch : any numeric or symbolic batch.
This includes numpy.ndarray, theano.gof.Variable, None, or a (nested)
tuple thereof.
predicate : function.
A unary function of any non-composite batch that returns True or False.
"""
# Catches any CompositeSpace batches that were mistakenly hand-constructed
# using nested lists rather than nested tuples.
assert not isinstance(batch, list)
# Data-less batches such as None or () are valid numeric and symbolic
# batches.
#
# Justification: we'd like
# is_symbolic_batch(space.make_theano_batch()) to always be True, even if
# space is an empty CompositeSpace.
if batch is None or (isinstance(batch, tuple) and len(batch) == 0):
return True
if isinstance(batch, tuple):
subbatch_results = tuple(_is_batch_all(b, predicate)
for b in batch)
result = all(subbatch_results)
# The subbatch_results must be all true, or all false, not a mix.
assert result == any(subbatch_results), ("composite batch had a "
"mixture of numeric and "
"symbolic subbatches. This "
"should never happen.")
return result
else:
return predicate(batch)
def is_symbolic_batch(batch):
"""
Returns True if batch is a symbolic variable.
Note that a batch may be both a symbolic and numeric variable
(e.g. () for empty CompositeSpaces, None for NullSpaces).
"""
return _is_batch_all(batch, lambda x: isinstance(x, theano.gof.Variable))
def is_numeric_batch(batch):
"""
Returns True if batch is a numeric variable.
Note that a batch may be both a symbolic and numeric variable
(e.g. () for empty CompositeSpaces, None for NullSpaces).
"""
def is_numeric(batch):
# Uses the 'CudaNdarray' string to avoid importing
# theano.sandbox.cuda when it is not available
return (isinstance(batch, np.ndarray) or
scipy.sparse.issparse(batch) or
str(type(batch)) == "<type 'CudaNdarray'>")
return _is_batch_all(batch, is_numeric)
def _dense_to_sparse(batch):
"""
Casts dense batches to sparse batches (non-composite).
Supports both symbolic and numeric variables.
"""
if isinstance(batch, tuple):
raise TypeError("Composite batches not supported.")
assert not isinstance(batch, list)
if is_symbolic_batch(batch):
assert isinstance(batch, theano.tensor.TensorVariable)
return theano.sparse.csr_from_dense(batch)
else:
assert isinstance(batch, np.ndarray), "type of batch: %s" % type(batch)
return scipy.sparse.csr_matrix(batch)
def _reshape(arg, shape):
"""
Reshapes a tensor. Supports both symbolic and numeric variables.
This is a hack that first converts from sparse to dense, reshapes
the dense tensor, then re-converts from dense to sparse. It is
therefore memory-inefficient and unsuitable for large tensors. It
will be replaced by a proper sparse reshaping Op once Theano
implements that.
"""
if isinstance(arg, tuple):
raise TypeError("Composite batches not supported.")
assert not isinstance(arg, list)
if isinstance(arg, (np.ndarray, theano.tensor.TensorVariable)):
return arg.reshape(shape)
elif isinstance(arg, theano.sparse.SparseVariable):
warnings.warn("Using pylearn2.space._reshape(), which is a "
"memory-inefficient hack for reshaping sparse tensors. "
"Do not use this on large tensors. This will eventually "
"be replaced by a proper Theano Op for sparse "
"reshaping, once that is written.")
dense = theano.sparse.dense_from_sparse(arg)
dense = dense.reshape(shape)
if arg.format == 'csr':
return theano.sparse.csr_from_dense(dense)
elif arg.format == 'csc':
return theano.sparse.csc_from_dense(dense)
else:
raise ValueError('Unexpected sparse format "%s".' % arg.format)
else:
raise TypeError('Unexpected batch type "%s"' % str(type(arg)))
def _cast(arg, dtype):
"""
Does element-wise casting to dtype.
Supports symbolic, numeric, simple, and composite batches.
Returns <arg> untouched if <dtype> is None, or dtype is unchanged
(i.e. casting a float32 batch to float32).
(One exception: composite batches are never returned as-is.
A new tuple will always be returned. However, any components
with unchanged dtypes will be returned untouched.)
"""
if dtype is None:
return arg
assert dtype in tuple(t.dtype for t in theano.scalar.all_types)
if isinstance(arg, tuple):
return tuple(_cast(a, dtype) for a in arg)
elif isinstance(arg, np.ndarray):
# theano._asarray is a safer drop-in replacement to numpy.asarray.
return theano._asarray(arg, dtype=dtype)
elif str(type(arg)) == "<type 'CudaNdarray'>": # numeric CUDA array
if str(dtype) != 'float32':
raise TypeError("Can only cast a numeric CudaNdarray to "
"float32, not %s" % dtype)
else:
return arg
elif (isinstance(arg, theano.gof.Variable) and
isinstance(arg.type, CudaNdarrayType)): # symbolic CUDA array
if str(dtype) != 'float32':
raise TypeError("Can only cast a theano CudaNdArrayType to "
"float32, not %s" % dtype)
else:
return arg
elif scipy.sparse.issparse(arg):
return arg.astype(dtype)
elif isinstance(arg, theano.tensor.TensorVariable):
return theano.tensor.cast(arg, dtype)
elif isinstance(arg, theano.sparse.SparseVariable):
return theano.sparse.cast(arg, dtype)
elif isinstance(arg, theano.sandbox.cuda.var.CudaNdarrayVariable):
return arg
else:
raise TypeError("Unsupported arg type '%s'" % str(type(arg)))
def _undo_op(arg, string, strict=False):
"""
Undo symbolic op if string is in str(op).
Returns <arg> untouched if there was no symbolic op.
Parameters
----------
arg : any symbolic variable.
string : str
String that specifies op.
strict : bool
Whether to force op undo or not (default False).
"""
if hasattr(arg.owner, 'op'):
owner = arg.owner
if string in str(owner.op):
return owner.inputs[0]
elif strict:
raise ValueError(string + ' not found in op ' +
str(owner.op) + '.')
elif strict:
raise ValueError(string + ' op not found in variable ' +
str(arg) + '.')
return arg
class Space(object):
"""
A vector space that can be transformed by a linear operator.
Space and its subclasses are used to transform a data batch's geometry
(e.g. vectors <--> matrices) and optionally, its dtype (e.g. float <-->
int).
Batches may be one of the following types:
- numpy.ndarray
- scipy.sparse.csr_matrix
- theano.gof.Variable
- None (for NullSpace)
- A (nested) tuple of the above, possibly empty
(for CompositeSpace).
Parameters
----------
validate_callbacks : list
Callbacks that are run at the start of a call to validate.
Each should be a callable with the same signature as validate.
An example use case is installing an instance-specific error
handler that provides extra instructions for how to correct an
input that is in a bad space.
np_validate_callacks : list
similar to validate_callbacks, but run on calls to np_validate
"""
def __init__(self, validate_callbacks=None,
np_validate_callbacks=None):
if validate_callbacks is None:
validate_callbacks = []
if np_validate_callbacks is None:
np_validate_callbacks = []
self.validate_callbacks = validate_callbacks
self.np_validate_callbacks = np_validate_callbacks
# Forces subclasses to implement __eq__.
# This is necessary for _format_as to work correctly.
def __eq__(self, other):
"""
Returns true iff
space.format_as(batch, self) and
space.format_as(batch, other) return the same formatted batch.
"""
raise NotImplementedError("__eq__ not implemented in class %s." %
type(self))
def get_batch_axis(self):
"""
Returns the batch axis of the output space.
Returns
-------
batch_axis : int
the axis of the batch in the output space.
"""
return 0
def __ne__(self, other):
"""
.. todo::
WRITEME
"""
return not (self == other)
def __repr__(self):
"""
.. todo::
WRITEME
"""
return str(self)
@property
def dtype(self):
"""
An object representing the data type used by this space.
For simple spaces, this will be a dtype string, as used by numpy,
scipy, and theano (e.g. 'float32').
For data-less spaces like NoneType, this will be some other string.
For composite spaces, this will be a nested tuple of such strings.
"""
raise NotImplementedError()
@dtype.setter
def dtype(self, new_value):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
@dtype.deleter
def dtype(self):
"""
.. todo::
WRITEME
"""
raise RuntimeError("You may not delete the dtype of a space, "
"though you can set it to None.")
def get_origin(self):
"""
Returns the origin in this space.
Returns
-------
origin : ndarray
An NumPy array, the shape of a single points in this
space, representing the origin.
"""
raise NotImplementedError()
def get_origin_batch(self, batch_size, dtype=None):
"""
Returns a batch containing `batch_size` copies of the origin.
Parameters
----------
batch_size : int
The number of examples in the batch to be returned.
dtype : WRITEME
The dtype of the batch to be returned. Default = None.
If None, use self.dtype.
Returns
-------
batch : ndarray
A NumPy array in the shape of a batch of `batch_size` points in
this space (with points being indexed along the first axis),
each `batch[i]` being a copy of the origin.
"""
raise NotImplementedError()
def make_shared_batch(self, batch_size, name=None, dtype=None):
"""
.. todo::
WRITEME
"""
dtype = self._clean_dtype_arg(dtype)
origin_batch = self.get_origin_batch(batch_size, dtype)
return theano.shared(origin_batch, name=name)
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
"""
Returns a symbolic variable representing a batch of points
in this space.
Parameters
----------
name : str
Variable name for the returned batch.
dtype : str
Data type for the returned batch.
If omitted (None), self.dtype is used.
batch_size : int
Number of examples in the returned batch.
Returns
-------
batch : TensorVariable, SparseVariable, or tuple thereof
A batch with the appropriate number of dimensions and
appropriate broadcast flags to represent a batch of
points in this space.
"""
raise NotImplementedError()
def make_batch_theano(self, name=None, dtype=None, batch_size=None):
""" An alias to make_theano_batch """
return self.make_theano_batch(name=name,
dtype=dtype,
batch_size=batch_size)
@wraps(make_theano_batch)
def get_theano_batch(self, *args, **kwargs):
return self.make_theano_batch(*args, **kwargs)
def get_total_dimension(self):
"""
Returns a Python int (not a theano iscalar) representing
the dimensionality of a point in this space.
If you format a batch of examples in this space as a
design matrix (i.e., VectorSpace batch) then the
number of columns will be equal to the total dimension.
"""
raise NotImplementedError(str(type(self)) +
" does not implement get_total_dimension.")
def np_format_as(self, batch, space):
"""
Returns a numeric batch (e.g. a numpy.ndarray or scipy.sparse sparse
array), formatted to lie in this space.
This is just a wrapper around self._format_as, with an extra check
to throw an exception if <batch> is symbolic.
Should be invertible, i.e. batch should equal
`space.format_as(self.format_as(batch, space), self)`
Parameters
----------
batch : numpy.ndarray, or one of the scipy.sparse matrices.
Array which lies in this space.
space : Space
Target space to format batch to.
Returns
-------
WRITEME
The formatted batch
"""
self._check_is_numeric(batch)
return self._format_as(is_numeric=True,
batch=batch,
space=space)
def _check_sizes(self, space):
"""
Called by self._format_as(space), to check whether self and space
have compatible sizes. Throws a ValueError if they don't.
"""
my_dimension = self.get_total_dimension()
other_dimension = space.get_total_dimension()
if my_dimension != other_dimension:
raise ValueError(str(self) + " with total dimension " +
str(my_dimension) +
" can't format a batch into " +
str(space) + "because its total dimension is " +
str(other_dimension))
def format_as(self, batch, space):
"""
.. todo::
WRITEME
"""
self._check_is_symbolic(batch)
return self._format_as(is_numeric=False,
batch=batch,
space=space)
def _format_as(self, is_numeric, batch, space):
"""
The shared implementation of format_as() and np_format_as().
Agnostic to whether batch is symbolic or numeric, which avoids
duplicating a lot of code between format_as() and np_format_as().
Calls the appropriate callbacks, then calls self._format_as_impl().
Should be invertible, i.e. batch should equal
`space._format_as(self._format_as(batch, space), self)`
Parameters
----------
is_numeric : bool
Set to True to call np_validate_callbacks().
Set to False to call validate_callbacks().
batch : WRITEME
space : Space
WRITEME
Returns
-------
WRITEME
"""
assert isinstance(is_numeric, bool)
# Checks if batch belongs to this space
self._validate(is_numeric, batch)
# checks if self and space have compatible sizes for formatting.
self._check_sizes(space)
return self._format_as_impl(is_numeric, batch, space)
def _format_as_impl(self, is_numeric, batch, target_space):
"""
Actual implementation of format_as/np_format_as. Formats batch to
target_space.
Should be invertible, i.e. batch should equal
`space._format_as_impl(self._format_as_impl(batch, space), self)`
Parameters
----------
is_numeric : bool
Set to True to treat batch as a numeric batch, False to
treat it as a symbolic batch. This is necessary because
sometimes a batch's numeric/symbolicness can be ambiguous,
i.e. when it's the empty tuple ().
batch : a numpy.ndarray, scipy.sparse matrix, theano symbol, or a \
nested tuple thereof
Implementations of this method may assume that batch lies in this
space (i.e. that it passed self._validate(batch) without throwing
an exception).
target_space : A Space subclass
The space to transform batch into.
Returns
-------
The batch, converted to the target_space.
"""
raise NotImplementedError("%s does not implement _format_as_impl()." %
type(self))
def undo_np_format_as(self, batch, space):
"""
Returns a numeric batch (e.g. a numpy.ndarray or scipy.sparse sparse
array), with formatting from space undone.
This is just a wrapper around self._undo_format_as, with an extra check
to throw an exception if <batch> is symbolic.
Parameters
----------
batch : numpy.ndarray, or one of the scipy.sparse matrices.
Array which lies in this space.
space : Space
Space to undo formatting from.
Returns
-------
numpy.ndarray or one of the scipy.sparse matrices
The formatted batch.
"""
self._check_is_numeric(batch)
return space.np_format_as(batch=batch,
space=self)
def undo_format_as(self, batch, space):
"""
Returns a symbolic batch (e.g. a theano.tensor or theano.sparse
array), with formatting from space undone.
This is just a wrapper around self._undo_format_as, with an extra check
to throw an exception if <batch> is symbolic. Formatting to space
Parameters
----------
batch : numpy.ndarray, or one of the scipy.sparse matrices.
Array which lies in this space.
space : Space
Space to undo formatting from.
Returns
-------
A symbolic Theano variable
The batch formatted as self.
"""
self._check_is_symbolic(batch)
space.validate(batch)
self._check_sizes(space)
batch = self._undo_format_as_impl(batch=batch,
space=space)
# Checks if batch belongs to this space
self.validate(batch)
return batch
def _undo_format_as_impl(self, batch, target_space):
"""
Actual implementation of undo_format_as.
Undoes target_space_formatting.
Note that undo_np_format_as calls np_format_as.
Parameters
----------
batch : a theano symbol, or a nested tuple thereof
Implementations of this method may assume that batch lies in
space (i.e. that it passed self._validate(batch) without throwing
an exception).
target_space : A Space subclass
The space to undo batch formatting from.
Returns
-------
A symbolic Theano variable
The batch, converted from target_space, back to self.
"""
raise NotImplementedError("%s does not implement "
"_undo_format_as_impl()." %
type(self))
def validate(self, batch):
"""
Runs all validate_callbacks, then checks that batch lies in this space.
Raises an exception if the batch isn't symbolic, or if any of these
checks fails.
Parameters
----------
batch : a symbolic (Theano) variable that lies in this space.
"""
self._check_is_symbolic(batch)
self._validate(is_numeric=False, batch=batch)
def np_validate(self, batch):
"""
Runs all np_validate_callbacks, then checks that batch lies in this
space. Raises an exception if the batch isn't numeric, or if any of
these checks fails.
Parameters
----------
batch : a numeric (numpy/scipy.sparse) variable that lies in this \
space
"""
self._check_is_numeric(batch)
self._validate(is_numeric=True, batch=batch)
def _validate(self, is_numeric, batch):
"""
Shared implementation of validate() and np_validate().
Calls validate_callbacks or np_validate_callbacks as appropriate,
then calls self._validate_impl(batch) to verify that batch belongs
to this space.
Parameters
----------
is_numeric : bool.
Set to True to call np_validate_callbacks,
False to call validate_callbacks.
Necessary because it can be impossible to tell from the
batch whether it should be treated as a numeric of symbolic
batch, for example when the batch is the empty tuple (),
or NullSpace batch None.
batch : a theano variable, numpy ndarray, scipy.sparse matrix \
or a nested tuple thereof
Represents a batch belonging to this space.
"""
if is_numeric:
self._check_is_numeric(batch)
callbacks_name = "np_validate_callbacks"
else:
self._check_is_symbolic(batch)
callbacks_name = "validate_callbacks"
if not hasattr(self, callbacks_name):
raise TypeError("The " + str(type(self)) + " Space subclass "
"is required to call the Space superclass "
"constructor but does not.")
else:
callbacks = getattr(self, callbacks_name)
for callback in callbacks:
callback(batch)
self._validate_impl(is_numeric, batch)
def _validate_impl(self, is_numeric, batch):
"""
Subclasses must override this method so that it throws an
exception if the batch is the wrong shape or dtype for this Space.
Parameters
----------
is_numeric : bool
Set to True to treat batch as a numeric type
(numpy.ndarray or scipy.sparse matrix).
Set to False to treat batch as a symbolic (Theano) variable.
Necessary because batch could be (), which could be numeric
or symbolic.
batch : A numpy ndarray, scipy.sparse matrix, theano variable \
or a nested tuple thereof.
Must be a valid batch belonging to this space.
"""
raise NotImplementedError('Class "%s" does not implement '
'_validate_impl()' % type(self))
def batch_size(self, batch):
"""
Returns the batch size of a symbolic batch.
Parameters
----------
batch : WRITEME
"""
return self._batch_size(is_numeric=False, batch=batch)
def np_batch_size(self, batch):
"""
Returns the batch size of a numeric (numpy/scipy.sparse) batch.
Parameters
----------
batch : WRITEME
"""
return self._batch_size(is_numeric=True, batch=batch)
def _batch_size(self, is_numeric, batch):
"""
.. todo::
WRITEME
"""
self._validate(is_numeric, batch)
return self._batch_size_impl(is_numeric, batch)
def _batch_size_impl(self, is_numeric, batch):
"""
Returns the batch size of a batch.
Parameters
----------
batch : WRITEME
"""
raise NotImplementedError("%s does not implement batch_size" %
type(self))
def get_batch(self, data, start, end):
"""
Returns a batch of data starting from index `start` to index `stop`
Parameters
----------
data : WRITEME
start : WRITEME
end : WRITEME
"""
raise NotImplementedError(str(type(self)) + " does not implement " +
"get_batch")
@staticmethod
def _check_is_numeric(batch):
"""
.. todo::
WRITEME
"""
if not is_numeric_batch(batch):
raise TypeError('Expected batch to be a numeric variable, but '
'instead it was of type "%s"' % type(batch))
@staticmethod
def _check_is_symbolic(batch):
"""
.. todo::
WRITEME
"""
if not is_symbolic_batch(batch):
raise TypeError('Expected batch to be a symbolic variable, but '
'instead it was of type "%s"' % type(batch))
def _clean_dtype_arg(self, dtype):
"""
Checks dtype string for validity, and returns it if it is.
If dtype is 'floatX', returns the theano.config.floatX dtype (this will
either be 'float32' or 'float64'.
"""
if isinstance(dtype, np.dtype):
dtype = str(dtype)
if dtype == 'floatX':
return theano.config.floatX
if dtype is None or \
dtype in tuple(x.dtype for x in theano.scalar.all_types):
return dtype
raise TypeError('Unrecognized value "%s" (type %s) for dtype arg' %
(dtype, type(dtype)))
class SimplyTypedSpace(Space):
"""
An abstract base class for Spaces that use a numpy/theano dtype string for
its .dtype property.
"""
def __init__(self, dtype='floatX', **kwargs):
super(SimplyTypedSpace, self).__init__(**kwargs)
self._dtype = super(SimplyTypedSpace, self)._clean_dtype_arg(dtype)
def _clean_dtype_arg(self, dtype):
"""
if dtype is None, checks that self.dtype is not None.
Otherwise, same as superclass' implementation.
"""
if dtype is None:
if self.dtype is None:
raise TypeError("self.dtype is None, so you must provide a "
"non-None dtype argument to this method.")
return self.dtype
return super(SimplyTypedSpace, self)._clean_dtype_arg(dtype)
def _validate_impl(self, is_numeric, batch):
"""
.. todo::
WRITEME
"""
if isinstance(batch, tuple):
raise TypeError("This space only supports simple dtypes, but "
"received a composite batch.")
# Checks for information-destroying casts.
#
# To be maximally strict, we'd guard against all loss of precision by
# checking if np.can_cast(batch.dtype, self.dtype).
#
# Because this prohibits float64->float32, it breaks too much of the
# codebase (float64 is default float, float32 is default CUDA float for
# many graphics cards).
#
# Therefore, we only prohibit the following:
# * non-integral type to integral type
# * complex to non-complex
def is_complex(dtype):
return np.issubdtype(dtype, np.complex)
def is_integral(dtype):
return np.issubdtype(dtype, np.integer)
if self.dtype is not None:
if (is_complex(batch.dtype) and not is_complex(self.dtype)) or \
(not is_integral(batch.dtype) and is_integral(self.dtype)):
raise TypeError("Cannot safely cast batch dtype %s to "
"space's dtype %s. " %
(batch.dtype, self.dtype))
@property
def dtype(self):
"""
.. todo::
WRITEME
"""
return self._dtype
@dtype.setter
def dtype(self, new_dtype):
"""
.. todo::
WRITEME
"""
self._dtype = super(SimplyTypedSpace, self)._clean_dtype_arg(new_dtype)
def __setstate__(self, state_dict):
"""
.. todo::
WRITEME
"""
self.__dict__.update(state_dict)
# When unpickling a Space that was pickled before Spaces had dtypes,
# we need to set the _dtype to the default value.
if '_dtype' not in state_dict:
self._dtype = theano.config.floatX
class IndexSpace(SimplyTypedSpace):
"""
A space representing indices, for example MNIST labels (0-10) or the
indices of words in a dictionary for NLP tasks. A single space can
contain multiple indices, for example the word indices of an n-gram.
IndexSpaces can be converted to VectorSpaces in two ways: Either the
labels are converted into one-hot vectors which are then concatenated,
or they are converted into a single vector where 1s indicate labels
present i.e. for 4 possible labels we have [0, 2] -> [1 0 1 0] or
[0, 2] -> [1 0 0 0 0 0 1 0].
Parameters
----------
max_labels : int
The number of possible classes/labels. This means that
all labels should be < max_labels. Example: For MNIST
there are 10 numbers and hence max_labels = 10.
dim : int
The number of indices in one space e.g. for MNIST there is
one target label and hence dim = 1. If we have an n-gram
of word indices as input to a neurel net language model, dim = n.
dtype : str
A numpy dtype string indicating this space's dtype.
Must be an integer type e.g. int32 or int64.
kwargs : dict
Passes on to superclass constructor
"""
def __init__(self, max_labels, dim, dtype='int64', **kwargs):
if 'int' not in dtype:
raise ValueError("The dtype of IndexSpace must be an integer type")
super(IndexSpace, self).__init__(dtype, **kwargs)
self.max_labels = max_labels
self.dim = dim
self.formatter = OneHotFormatter(self.max_labels)
def __str__(self):
"""Return a string representation"""
return ('%(classname)s(dim=%(dim)s, max_labels=%(max_labels)s, '
'dtype=%(dtype)s)') % dict(classname=self.__class__.__name__,
dim=self.dim,
max_labels=self.max_labels,
dtype=self.dtype)
def __hash__(self):
return hash((type(self), self.dim, self.max_labels, self.dtype))
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
return (type(self) == type(other) and
self.max_labels == other.max_labels and
self.dim == other.dim and
self.dtype == other.dtype)
def __ne__(self, other):
"""
.. todo::
WRITEME
"""
return (not self == other)
@functools.wraps(Space.get_total_dimension)
def get_total_dimension(self):
return self.dim
@functools.wraps(Space.get_origin)
def get_origin(self):
return np.zeros((1, self.dim,))
@functools.wraps(Space.get_origin_batch)
def get_origin_batch(self, batch_size, dtype=None):
dtype = self._clean_dtype_arg(dtype)
return np.zeros((batch_size, self.dim), dtype=dtype)
@functools.wraps(Space._check_sizes)
def _check_sizes(self, space):
if isinstance(space, VectorSpace):
if space.dim not in (self.max_labels, # merged onehots
self.dim * self.max_labels): # concatenated
raise ValueError("Can't convert to VectorSpace of dim %d. "
"Expected either dim=%d (merged one-hots) or "
"%d (concatenated one-hots)" %
(space.dim,
self.max_labels,
self.dim * self.max_labels))
elif isinstance(space, IndexSpace):
if space.dim != self.dim or space.max_labels != self.max_labels:
raise ValueError("Can't convert to IndexSpace of dim %d and "
"max_labels %d." %
(space.dim, self.max_labels))
else:
raise ValueError("Can't convert to " + str(space.__class__))
@functools.wraps(Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
if isinstance(space, VectorSpace):
if self.max_labels == space.dim:
mode = 'merge'
elif self.dim * self.max_labels == space.dim:
mode = 'concatenate'
else:
raise ValueError("There is a bug. Couldn't format to a "
"VectorSpace because it had an incorrect "
"size, but this should've been caught in "
"IndexSpace._check_sizes().")
format_func = (self.formatter.format if is_numeric else
self.formatter.theano_expr)
return _cast(format_func(batch, sparse=space.sparse, mode=mode),
space.dtype)
elif isinstance(space, IndexSpace):
if space.dim != self.dim or space.max_labels != self.max_labels:
raise ValueError("The two IndexSpaces' dim and max_labels "
"values don't match. This should have been "
"caught by IndexSpace._check_sizes().")
return _cast(batch, space.dtype)
else:
raise ValueError("Can't convert %s to %s"
% (self, space))
@functools.wraps(Space.make_theano_batch)
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
if batch_size == 1:
rval = tensor.lrow(name=name)
else:
rval = tensor.lmatrix(name=name)
if theano.config.compute_test_value != 'off':
if batch_size == 1:
n = 1
else:
# TODO: try to extract constant scalar value from batch_size
n = 4
rval.tag.test_value = self.get_origin_batch(batch_size=n,
dtype=dtype)
return rval
@functools.wraps(Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
return batch.shape[0]
@functools.wraps(Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
"""
.. todo::
WRITEME
"""
# checks that batch isn't a tuple, checks batch.type against self.dtype
super(IndexSpace, self)._validate_impl(is_numeric, batch)
if is_numeric:
# Use the 'CudaNdarray' string to avoid importing
# theano.sandbox.cuda when it is not available
if not isinstance(batch, np.ndarray) \
and str(type(batch)) != "<type 'CudaNdarray'>":
raise TypeError("The value of a IndexSpace batch should be a "
"numpy.ndarray, or CudaNdarray, but is %s."
% str(type(batch)))
if batch.ndim != 2:
raise ValueError("The value of a IndexSpace batch must be "
"2D, got %d dimensions for %s." % (batch.ndim,
batch))
if batch.shape[1] != self.dim:
raise ValueError("The width of a IndexSpace batch must match "
"with the space's dimension, but batch has "
"shape %s and dim = %d." % (str(batch.shape),
self.dim))
else:
if not isinstance(batch, theano.gof.Variable):
raise TypeError("IndexSpace batch should be a theano "
"Variable, got " + str(type(batch)))
if not isinstance(batch.type, (theano.tensor.TensorType,
CudaNdarrayType)):
raise TypeError("IndexSpace batch should be TensorType or "
"CudaNdarrayType, got " + str(batch.type))
if batch.ndim != 2:
raise ValueError('IndexSpace batches must be 2D, got %d '
'dimensions' % batch.ndim)
for val in get_debug_values(batch):
self.np_validate(val)
class VectorSpace(SimplyTypedSpace):
"""
A space whose points are defined as fixed-length vectors.
Parameters
----------
dim : int
Dimensionality of a vector in this space.
sparse : bool, optional
Sparse vector or not
dtype : str, optional
A numpy dtype string (e.g. 'float32') indicating this space's
dtype, or None for a dtype-agnostic space.
kwargs : dict
Passed on to superclass constructor.
"""
def __init__(self,
dim,
sparse=False,
dtype='floatX',
**kwargs):
super(VectorSpace, self).__init__(dtype, **kwargs)
self.dim = dim
self.sparse = sparse
def __str__(self):
"""
.. todo::
WRITEME
"""
return ('%s(dim=%d%s, dtype=%s)' %
(self.__class__.__name__,
self.dim,
', sparse' if self.sparse else '',
self.dtype))
@functools.wraps(Space.get_origin)
def get_origin(self):
return np.zeros((self.dim,))
@functools.wraps(Space.get_origin_batch)
def get_origin_batch(self, batch_size, dtype=None):
dtype = self._clean_dtype_arg(dtype)
if self.sparse:
return scipy.sparse.csr_matrix((batch_size, self.dim), dtype=dtype)
else:
return np.zeros((batch_size, self.dim), dtype=dtype)
@functools.wraps(Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
return batch.shape[0]
@functools.wraps(Space.make_theano_batch)
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
dtype = self._clean_dtype_arg(dtype)
if self.sparse:
if batch_size is not None:
raise NotImplementedError("batch_size not implemented "
"for sparse case")
rval = theano.sparse.csr_matrix(name=name, dtype=dtype)
else:
if batch_size == 1:
rval = tensor.row(name=name, dtype=dtype)
else:
rval = tensor.matrix(name=name, dtype=dtype)
if theano.config.compute_test_value != 'off':
if batch_size == 1:
n = 1
else:
# TODO: try to extract constant scalar value from batch_size
n = 4
rval.tag.test_value = self.get_origin_batch(batch_size=n,
dtype=dtype)
return rval
@functools.wraps(Space.get_total_dimension)
def get_total_dimension(self):
return self.dim
@functools.wraps(Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
to_type = None
def is_sparse(batch):
return (isinstance(batch, theano.sparse.SparseVariable) or
scipy.sparse.issparse(batch))
if not isinstance(space, IndexSpace):
my_dimension = self.get_total_dimension()
other_dimension = space.get_total_dimension()
if my_dimension != other_dimension:
raise ValueError(str(self) + " with total dimension " +
str(my_dimension) +
" can't format a batch into " +
str(space) +
"because its total dimension is " +
str(other_dimension))
if isinstance(space, CompositeSpace):
if isinstance(batch, theano.sparse.SparseVariable):
warnings.warn('Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why. Formatting batch type %s '
'from space %s to space %s' %
(type(batch), self, space))
pos = 0
pieces = []
for component in space.components:
width = component.get_total_dimension()
subtensor = batch[:, pos:pos + width]
pos += width
vector_subspace = VectorSpace(dim=width,
dtype=self.dtype,
sparse=self.sparse)
formatted = vector_subspace._format_as(is_numeric,
subtensor,
component)
pieces.append(formatted)
result = tuple(pieces)
elif isinstance(space, Conv2DSpace):
if is_sparse(batch):
raise TypeError("Formatting a SparseVariable to a Conv2DSpace "
"is not supported, since neither scipy nor "
"Theano has sparse tensors with more than 2 "
"dimensions. We need 4 dimensions to "
"represent a Conv2DSpace batch")
dims = {'b': batch.shape[0],
'c': space.num_channels,
0: space.shape[0],
1: space.shape[1]}
if space.axes != space.default_axes:
# Always use default_axes, so conversions like
# Conv2DSpace(c01b) -> VectorSpace -> Conv2DSpace(b01c) work
shape = [dims[ax] for ax in space.default_axes]
batch = _reshape(batch, shape)
batch = batch.transpose(*[space.default_axes.index(ax)
for ax in space.axes])
result = batch
else:
shape = tuple([dims[elem] for elem in space.axes])
result = _reshape(batch, shape)
to_type = space.dtype
elif isinstance(space, VectorSpace):
if self.dim != space.dim:
raise ValueError("Can't convert between VectorSpaces of "
"different sizes (%d to %d)."
% (self.dim, space.dim))
if space.sparse != is_sparse(batch):
if space.sparse:
batch = _dense_to_sparse(batch)
elif isinstance(batch, theano.sparse.SparseVariable):
batch = theano.sparse.dense_from_sparse(batch)
elif scipy.sparse.issparse(batch):
batch = batch.todense()
else:
assert False, ("Unplanned-for branch in if-elif-elif "
"chain. This is a bug in the code.")
result = batch
to_type = space.dtype
else:
raise NotImplementedError("%s doesn't know how to format as %s" %
(self, space))
return _cast(result, dtype=to_type)
@functools.wraps(Space._undo_format_as_impl)
def _undo_format_as_impl(self, batch, space):
def is_sparse(batch):
return isinstance(batch, theano.sparse.SparseVariable)
if not isinstance(space, IndexSpace):
my_dimension = self.get_total_dimension()
other_dimension = space.get_total_dimension()
if my_dimension != other_dimension:
raise ValueError(str(self) + " with total dimension " +
str(my_dimension) +
" can't undo format a batch from " +
str(space) +
"because its total dimension is " +
str(other_dimension))
if isinstance(space, CompositeSpace):
if isinstance(batch, theano.sparse.SparseVariable):
warnings.warn('Undo formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why. Formatting batch type %s '
'from space %s to space %s' %
(type(batch), self, space))
# Recursively try and find a non-Composite, non-Null space
# to extract underlying theano variable
def extract_vector_variable(composite_space, batch_tuple):
found = False
for sp, el in safe_zip(composite_space.components,
batch_tuple):
dim = sp.get_total_dimension()
if not isinstance(sp, NullSpace) and dim > 0:
if isinstance(sp, CompositeSpace):
var, found = extract_vector_variable(sp, el)
var = var.owner.inputs[0]
else:
dummy_sp = VectorSpace(dim=dim,
sparse=sp.sparse,
dtype=sp.dtype
)
var = dummy_sp.undo_format_as(el, sp)
found = True
if found:
break
return var, found
var, found = extract_vector_variable(space, batch)
batch = var
if not found:
raise TypeError("Could not find a valid space "
"to undo format from in the "
"CompositeSpace.")
else:
# Undo subtensor slice
owner = batch.owner
assert 'Subtensor' in str(owner.op)
batch = owner.inputs[0]
elif isinstance(space, Conv2DSpace):
if is_sparse(batch):
raise TypeError("Undo formatting a SparseVariable to a "
"Conv2DSpace is not supported, since "
"neither scipy nor Theano has sparse "
"tensors with more than 2 dimensions. "
"We need 4 dimensions to represent a "
"Conv2DSpace batch")
# Check for cast
batch = _undo_op(batch, 'Cast')
# Undo axes shuffle
if space.axes != space.default_axes:
batch = _undo_op(batch, 'DimShuffle', strict=True)
# Undo reshape
batch = _undo_op(batch, 'Reshape{4}', strict=True)
elif isinstance(space, VectorSpace):
if self.dim != space.dim:
raise ValueError("Can't convert between VectorSpaces of "
"different sizes (%d to %d)."
% (self.dim, space.dim))
# Check for cast
batch = _undo_op(batch, 'Cast')
# Undo any sparse-dense switches
if self.sparse != is_sparse(batch):
if space.sparse:
batch = _undo_op(batch, 'SparseFromDense', strict=True)
elif isinstance(batch, theano.sparse.SparseVariable):
batch = _undo_op(batch, 'DenseFromSparse', strict=True)
else:
assert False, ("Unplanned-for branch in if-elif "
"chain. This is a bug in the code.")
else:
raise NotImplementedError("%s doesn't know how to format as %s" %
(self, space))
return batch
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
return (type(self) == type(other) and
self.dim == other.dim and
self.sparse == other.sparse and
self.dtype == other.dtype)
def __hash__(self):
"""
.. todo::
WRITEME
"""
return hash((type(self), self.dim, self.sparse, self.dtype))
@functools.wraps(Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
"""
.. todo::
WRITEME
"""
# checks that batch isn't a tuple, checks batch.type against self.dtype
super(VectorSpace, self)._validate_impl(is_numeric, batch)
if isinstance(batch, theano.gof.Variable):
if self.sparse:
if not isinstance(batch.type, theano.sparse.SparseType):
raise TypeError('This VectorSpace is%s sparse, but the '
'provided batch is not. (batch type: "%s")'
% ('' if self.sparse else ' not',
type(batch)))
elif not isinstance(batch.type, (theano.tensor.TensorType,
CudaNdarrayType)):
raise TypeError("VectorSpace batch should be TensorType or "
"CudaNdarrayType, got " + str(batch.type))
if batch.ndim != 2:
raise ValueError('VectorSpace batches must be 2D, got %d '
'dimensions' % batch.ndim)
for val in get_debug_values(batch):
self.np_validate(val) # sic; val is numeric, not symbolic
else:
# Use the 'CudaNdarray' string to avoid importing
# theano.sandbox.cuda when it is not available
if (not self.sparse
and not isinstance(batch, np.ndarray)
and type(batch) != 'CudaNdarray'):
raise TypeError("The value of a VectorSpace batch should be a "
"numpy.ndarray, or CudaNdarray, but is %s."
% str(type(batch)))
if self.sparse:
if not theano.sparse.enable_sparse:
raise TypeError("theano.sparse is not enabled, cannot "
"have a value for a sparse VectorSpace.")
if not scipy.sparse.issparse(batch):
raise TypeError("The value of a sparse VectorSpace batch "
"should be a sparse scipy matrix, got %s "
"of type %s." % (batch, type(batch)))
if batch.ndim != 2:
raise ValueError("The value of a VectorSpace batch must be "
"2D, got %d dimensions for %s." % (batch.ndim,
batch))
if batch.shape[1] != self.dim:
raise ValueError("The width of a VectorSpace batch must match "
"with the space's dimension, but batch has "
"shape %s and dim = %d." %
(str(batch.shape), self.dim))
class VectorSequenceSpace(SimplyTypedSpace):
"""
A space representing a single, variable-length sequence of fixed-sized
vectors.
Parameters
----------
dim : int
Vector size
dtype : str, optional
A numpy dtype string indicating this space's dtype.
kwargs : dict
Passes on to superclass constructor
"""
def __init__(self, dim, dtype='floatX', **kwargs):
super(VectorSequenceSpace, self).__init__(dtype, **kwargs)
self.dim = dim
def __str__(self):
"""Return a string representation"""
return ('%(classname)s(dim=%(dim)s, dtype=%(dtype)s)' %
dict(classname=self.__class__.__name__,
dim=self.dim,
dtype=self.dtype))
@wraps(Space.__eq__)
def __eq__(self, other):
return (type(self) == type(other) and
self.dim == other.dim and
self.dtype == other.dtype)
@wraps(Space._check_sizes)
def _check_sizes(self, space):
if not isinstance(space, VectorSequenceSpace):
raise ValueError("Can't convert to " + str(space.__class__))
else:
if space.dim != self.dim:
raise ValueError("Can't convert to VectorSequenceSpace of "
"dim %d" %
(space.dim,))
@wraps(Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
if isinstance(space, VectorSequenceSpace):
if space.dim != self.dim:
raise ValueError("The two VectorSequenceSpaces' dim "
"values don't match. This should have been "
"caught by "
"VectorSequenceSpace._check_sizes().")
return _cast(batch, space.dtype)
else:
raise ValueError("Can't convert %s to %s" % (self, space))
@wraps(Space.make_theano_batch)
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
if batch_size == 1:
return tensor.matrix(name=name)
else:
return ValueError("VectorSequenceSpace does not support batches "
"of sequences.")
@wraps(Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
# Only batch size of 1 is supported
return 1
@wraps(Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
# checks that batch isn't a tuple, checks batch.type against self.dtype
super(VectorSequenceSpace, self)._validate_impl(is_numeric, batch)
if is_numeric:
# Use the 'CudaNdarray' string to avoid importing
# theano.sandbox.cuda when it is not available
if not isinstance(batch, np.ndarray) \
and str(type(batch)) != "<type 'CudaNdarray'>":
raise TypeError("The value of a VectorSequenceSpace batch "
"should be a numpy.ndarray, or CudaNdarray, "
"but is %s." % str(type(batch)))
if batch.ndim != 2:
raise ValueError("The value of a VectorSequenceSpace batch "
"must be 2D, got %d dimensions for %s."
% (batch.ndim, batch))
if batch.shape[1] != self.dim:
raise ValueError("The width of a VectorSequenceSpace 'batch' "
"must match with the space's window"
"dimension, but batch has dim %d and "
"this space's dim is %d."
% (batch.shape[1], self.dim))
else:
if not isinstance(batch, theano.gof.Variable):
raise TypeError("VectorSequenceSpace batch should be a theano "
"Variable, got " + str(type(batch)))
if not isinstance(batch.type, (theano.tensor.TensorType,
CudaNdarrayType)):
raise TypeError("VectorSequenceSpace batch should be "
"TensorType or CudaNdarrayType, got " +
str(batch.type))
if batch.ndim != 2:
raise ValueError("VectorSequenceSpace 'batches' must be 2D, "
"got %d dimensions" % batch.ndim)
for val in get_debug_values(batch):
self.np_validate(val)
class IndexSequenceSpace(SimplyTypedSpace):
"""
A space representing a single, variable-length sequence of indexes.
Parameters
----------
max_labels : int
The number of possible classes/labels. This means that
all labels should be < max_labels.
dim : int
The number of indices in one element of the sequence
dtype : str
A numpy dtype string indicating this space's dtype.
Must be an integer type e.g. int32 or int64.
kwargs : dict
Passes on to superclass constructor
"""
def __init__(self, max_labels, dim, dtype='int64', **kwargs):
if 'int' not in dtype:
raise ValueError("The dtype of IndexSequenceSpace must be an "
"integer type")
super(IndexSequenceSpace, self).__init__(dtype, **kwargs)
self.max_labels = max_labels
self.dim = dim
self.formatter = OneHotFormatter(self.max_labels)
def __str__(self):
"""Return a string representation"""
return ('%(classname)s(dim=%(dim)s, max_labels=%(max_labels)s, '
'dtype=%(dtype)s)') % dict(classname=self.__class__.__name__,
dim=self.dim,
max_labels=self.max_labels,
dtype=self.dtype)
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
return (type(self) == type(other) and
self.max_labels == other.max_labels and
self.dim == other.dim and
self.dtype == other.dtype)
@wraps(Space._check_sizes)
def _check_sizes(self, space):
if isinstance(space, VectorSequenceSpace):
# self.max_labels -> merged onehots
# self.dim * self.max_labels -> concatenated
if space.dim not in (self.max_labels, self.dim * self.max_labels):
raise ValueError("Can't convert to VectorSequenceSpace of "
"dim %d. Expected either "
"dim=%d (merged one-hots) or %d "
"(concatenated one-hots)" %
(space.dim,
self.max_labels,
self.dim * self.max_labels))
elif isinstance(space, IndexSequenceSpace):
if space.dim != self.dim or space.max_labels != self.max_labels:
raise ValueError("Can't convert to IndexSequenceSpace of "
"dim %d and max_labels %d." %
(space.dim, self.max_labels))
else:
raise ValueError("Can't convert to " + str(space.__class__))
@wraps(Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
if isinstance(space, VectorSequenceSpace):
if self.max_labels == space.dim:
mode = 'merge'
elif self.dim * self.max_labels == space.dim:
mode = 'concatenate'
else:
raise ValueError("There is a bug. Couldn't format to a "
"VectorSequenceSpace because it had an "
"incorrect size, but this should've been "
"caught in "
"IndexSequenceSpace._check_sizes().")
format_func = (self.formatter.format if is_numeric else
self.formatter.theano_expr)
return _cast(format_func(batch, mode=mode), space.dtype)
elif isinstance(space, IndexSequenceSpace):
if space.dim != self.dim or space.max_labels != self.max_labels:
raise ValueError("The two IndexSequenceSpaces' dim and "
"max_labels values don't match. This should "
"have been caught by "
"IndexSequenceSpace._check_sizes().")
return _cast(batch, space.dtype)
else:
raise ValueError("Can't convert %s to %s"
% (self, space))
@wraps(Space.make_theano_batch)
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
if batch_size == 1:
return tensor.matrix(name=name)
else:
return ValueError("IndexSequenceSpace does not support batches "
"of sequences.")
@wraps(Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
# Only batch size of 1 is supported
return 1
@wraps(Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
# checks that batch isn't a tuple, checks batch.type against self.dtype
super(IndexSequenceSpace, self)._validate_impl(is_numeric, batch)
if is_numeric:
# Use the 'CudaNdarray' string to avoid importing
# theano.sandbox.cuda when it is not available
if not isinstance(batch, np.ndarray) \
and str(type(batch)) != "<type 'CudaNdarray'>":
raise TypeError("The value of a IndexSequenceSpace batch "
"should be a numpy.ndarray, or CudaNdarray, "
"but is %s." % str(type(batch)))
if batch.ndim != 2:
raise ValueError("The value of a IndexSequenceSpace batch "
"must be 2D, got %d dimensions for %s." %
(batch.ndim, batch))
if batch.shape[1] != self.dim:
raise ValueError("The width of a IndexSequenceSpace batch "
"must match with the space's dimension, but "
"batch has shape %s and dim = %d." %
(str(batch.shape), self.dim))
else:
if not isinstance(batch, theano.gof.Variable):
raise TypeError("IndexSequenceSpace batch should be a theano "
"Variable, got " + str(type(batch)))
if not isinstance(batch.type, (theano.tensor.TensorType,
CudaNdarrayType)):
raise TypeError("IndexSequenceSpace batch should be "
"TensorType or CudaNdarrayType, got " +
str(batch.type))
if batch.ndim != 2:
raise ValueError('IndexSequenceSpace batches must be 2D, got '
'%d dimensions' % batch.ndim)
for val in get_debug_values(batch):
self.np_validate(val)
class Conv2DSpace(SimplyTypedSpace):
"""
A space whose points are 3-D tensors representing (potentially
multi-channel) images.
Parameters
----------
shape : sequence, length 2
The shape of a single image, i.e. (rows, cols).
num_channels : int (synonym: channels)
Number of channels in the image, i.e. 3 if RGB.
axes : tuple
A tuple indicating the semantics of each axis, containing the
following elements in some order:
- 'b' : this axis is the batch index of a minibatch.
- 'c' : this axis the channel index of a minibatch.
- 0 : topological axis 0 (rows)
- 1 : topological axis 1 (columns)
For example, a PIL image has axes (0, 1, 'c') or (0, 1).
The pylearn2 image displaying functionality uses
('b', 0, 1, 'c') for batches and (0, 1, 'c') for images.
theano's conv2d operator uses ('b', 'c', 0, 1) images.
dtype : str
A numpy dtype string (e.g. 'float32') indicating this space's
dtype, or None for a dtype-agnostic space.
kwargs : dict
Passed on to superclass constructor
"""
# Assume pylearn2's get_topological_view format, since this is how
# data is currently served up. If we make better iterators change
# default to ('b', 'c', 0, 1) for theano conv2d
default_axes = ('b', 0, 1, 'c')
def __init__(self,
shape,
channels=None,
num_channels=None,
axes=None,
dtype='floatX',
**kwargs):
super(Conv2DSpace, self).__init__(dtype, **kwargs)
assert (channels is None) + (num_channels is None) == 1
if num_channels is None:
num_channels = channels
assert isinstance(num_channels, py_integer_types)
if not hasattr(shape, '__len__'):
raise ValueError("shape argument for Conv2DSpace must have a "
"length. Got %s." % str(shape))
if len(shape) != 2:
raise ValueError("shape argument to Conv2DSpace must be length 2, "
"not %d" % len(shape))
assert all(isinstance(elem, py_integer_types) for elem in shape)
assert all(elem > 0 for elem in shape)
assert isinstance(num_channels, py_integer_types)
assert num_channels > 0
# Converts shape to a tuple, so it can be hashable, and self can be too
self.shape = tuple(shape)
self.num_channels = num_channels
if axes is None:
axes = self.default_axes
assert len(axes) == 4
self.axes = tuple(axes)
def __str__(self):
"""
.. todo::
WRITEME
"""
return ("%s(shape=%s, num_channels=%d, axes=%s, dtype=%s)" %
(self.__class__.__name__,
str(self.shape),
self.num_channels,
str(self.axes),
self.dtype))
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
assert isinstance(self.axes, tuple)
if isinstance(other, Conv2DSpace):
assert isinstance(other.axes, tuple)
return (type(self) == type(other) and
self.shape == other.shape and
self.num_channels == other.num_channels and
self.axes == other.axes and
self.dtype == other.dtype)
def __hash__(self):
"""
.. todo::
WRITEME
"""
return hash((type(self),
self.shape,
self.num_channels,
self.axes,
self.dtype))
@functools.wraps(Space.get_batch_axis)
def get_batch_axis(self):
return self.axes.index('b')
@functools.wraps(Space.get_origin)
def get_origin(self):
dims = {0: self.shape[0], 1: self.shape[1], 'c': self.num_channels}
shape = [dims[elem] for elem in self.axes if elem != 'b']
return np.zeros(shape, dtype=self.dtype)
@functools.wraps(Space.get_origin_batch)
def get_origin_batch(self, batch_size, dtype=None):
dtype = self._clean_dtype_arg(dtype)
if not isinstance(batch_size, py_integer_types):
raise TypeError("Conv2DSpace.get_origin_batch expects an int, "
"got %s of type %s" % (str(batch_size),
type(batch_size)))
assert batch_size > 0
dims = {'b': batch_size,
0: self.shape[0],
1: self.shape[1],
'c': self.num_channels}
shape = [dims[elem] for elem in self.axes]
return np.zeros(shape, dtype=dtype)
@functools.wraps(Space.make_theano_batch)
def make_theano_batch(self, name=None, dtype=None, batch_size=None):
dtype = self._clean_dtype_arg(dtype)
broadcastable = [False] * 4
broadcastable[self.axes.index('c')] = (self.num_channels == 1)
broadcastable[self.axes.index('b')] = (batch_size == 1)
broadcastable = tuple(broadcastable)
rval = TensorType(dtype=dtype,
broadcastable=broadcastable
)(name=name)
if theano.config.compute_test_value != 'off':
if batch_size == 1:
n = 1
else:
# TODO: try to extract constant scalar value from batch_size
n = 4
rval.tag.test_value = self.get_origin_batch(batch_size=n,
dtype=dtype)
return rval
@functools.wraps(Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
return batch.shape[self.axes.index('b')]
@staticmethod
def convert(tensor, src_axes, dst_axes):
"""
Returns a view of tensor using the axis semantics defined
by dst_axes. (If src_axes matches dst_axes, returns
tensor itself)
Useful for transferring tensors between different
Conv2DSpaces.
Parameters
----------
tensor : tensor_like
A 4-tensor representing a batch of images
src_axes : WRITEME
Axis semantics of tensor
dst_axes : WRITEME
WRITEME
"""
src_axes = tuple(src_axes)
dst_axes = tuple(dst_axes)
assert len(src_axes) == 4
assert len(dst_axes) == 4
if src_axes == dst_axes:
return tensor
shuffle = [src_axes.index(elem) for elem in dst_axes]
if is_symbolic_batch(tensor):
return tensor.dimshuffle(*shuffle)
else:
return tensor.transpose(*shuffle)
@staticmethod
def convert_numpy(tensor, src_axes, dst_axes):
"""
.. todo::
WRITEME
"""
return Conv2DSpace.convert(tensor, src_axes, dst_axes)
@functools.wraps(Space.get_total_dimension)
def get_total_dimension(self):
# Patch old pickle files
if not hasattr(self, 'num_channels'):
self.num_channels = self.nchannels
return self.shape[0] * self.shape[1] * self.num_channels
@functools.wraps(Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
# checks batch.type against self.dtype
super(Conv2DSpace, self)._validate_impl(is_numeric, batch)
if not is_numeric:
if isinstance(batch, theano.sparse.SparseVariable):
raise TypeError("Conv2DSpace cannot use SparseVariables, "
"since as of this writing (28 Dec 2013), "
"there is not yet a SparseVariable type with "
"4 dimensions")
if not isinstance(batch, theano.gof.Variable):
raise TypeError("Conv2DSpace batches must be theano "
"Variables, got " + str(type(batch)))
if not isinstance(batch.type, (theano.tensor.TensorType,
CudaNdarrayType)):
raise TypeError('Expected TensorType or CudaNdArrayType, got '
'"%s"' % type(batch.type))
if batch.ndim != 4:
raise ValueError("The value of a Conv2DSpace batch must be "
"4D, got %d dimensions for %s." %
(batch.ndim, batch))
for val in get_debug_values(batch):
self.np_validate(val)
else:
if scipy.sparse.issparse(batch):
raise TypeError("Conv2DSpace cannot use sparse batches, since "
"scipy.sparse does not support 4 dimensional "
"tensors currently (28 Dec 2013).")
if (not isinstance(batch, np.ndarray)) \
and type(batch) != 'CudaNdarray':
raise TypeError("The value of a Conv2DSpace batch should be a "
"numpy.ndarray, or CudaNdarray, but is %s."
% str(type(batch)))
if batch.ndim != 4:
raise ValueError("The value of a Conv2DSpace batch must be "
"4D, got %d dimensions for %s." %
(batch.ndim, batch))
d = self.axes.index('c')
actual_channels = batch.shape[d]
if actual_channels != self.num_channels:
raise ValueError("Expected axis %d to be number of channels "
"(%d) but it is %d" %
(d, self.num_channels, actual_channels))
assert batch.shape[self.axes.index('c')] == self.num_channels
for coord in [0, 1]:
d = self.axes.index(coord)
actual_shape = batch.shape[d]
expected_shape = self.shape[coord]
if actual_shape != expected_shape:
raise ValueError("Conv2DSpace with shape %s and axes %s "
"expected dimension %s of a batch (%s) "
"to have length %s but it has %s"
% (str(self.shape),
str(self.axes),
str(d),
str(batch),
str(expected_shape),
str(actual_shape)))
@functools.wraps(Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
if isinstance(space, VectorSpace):
# We need to ensure that the resulting batch will always be
# the same in `space`, no matter what the axes of `self` are.
if self.axes != self.default_axes:
# The batch index goes on the first axis
assert self.default_axes[0] == 'b'
batch = batch.transpose(*[self.axes.index(axis)
for axis in self.default_axes])
result = batch.reshape((batch.shape[0],
self.get_total_dimension()))
if space.sparse:
result = _dense_to_sparse(result)
elif isinstance(space, Conv2DSpace):
result = Conv2DSpace.convert(batch, self.axes, space.axes)
else:
raise NotImplementedError("%s doesn't know how to format as %s"
% (str(self), str(space)))
return _cast(result, space.dtype)
@functools.wraps(Space._undo_format_as_impl)
def _undo_format_as_impl(self, batch, space):
# Check for cast
batch = _undo_op(batch, 'Cast')
if isinstance(space, VectorSpace):
# Check for SparseFromDense
batch = _undo_op(batch, 'SparseFromDense')
# Undo reshape op
batch = _undo_op(batch, 'Reshape', strict=True)
# Check to see if axis ordering was changed
if self.axes != self.default_axes:
batch = _undo_op(batch, 'DimShuffle', strict=True)
elif isinstance(space, Conv2DSpace):
# Check to see if axis ordering was changed
if space.axes != self.axes:
batch = _undo_op(batch, 'DimShuffle', strict=True)
else:
raise NotImplementedError("%s doesn't know how to format as %s"
% (str(self), str(space)))
return batch
class CompositeSpace(Space):
"""
A Space whose points are tuples of points in other spaces.
May be nested, in which case the points are nested tuples.
Parameters
----------
components : WRITEME
kwargs : dict
WRITEME
"""
def __init__(self, components, **kwargs):
super(CompositeSpace, self).__init__(**kwargs)
assert isinstance(components, (list, tuple))
for i, component in enumerate(components):
if not isinstance(component, Space):
raise TypeError("component %d is %s of type %s, expected "
"Space instance. " %
(i, str(component), str(type(component))))
self.components = list(components)
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
return (type(self) == type(other) and
len(self.components) == len(other.components) and
all(my_component == other_component for
my_component, other_component in
zip(self.components, other.components)))
def __hash__(self):
"""
.. todo::
WRITEME
"""
return hash((type(self), tuple(self.components)))
def __str__(self):
"""
.. todo::
WRITEME
"""
return '%(classname)s(%(components)s)' % \
dict(classname=self.__class__.__name__,
components=', '.join([str(c) for c in self.components]))
@property
def dtype(self):
"""
Returns a nested tuple of dtype strings. NullSpaces will yield a bogus
dtype string (see NullSpace.dtype).
"""
def get_dtype_of_space(space):
if isinstance(space, CompositeSpace):
return tuple(get_dtype_of_space(c) for c in space.components)
elif isinstance(space, NullSpace):
return NullSpace().dtype
else:
return space.dtype
return get_dtype_of_space(self)
@dtype.setter
def dtype(self, new_dtype):
"""
If new_dtype is None or a string, it will be applied to all components
(except any NullSpaces).
If new_dtype is a (nested) tuple, its elements will be applied to
corresponding components.
"""
if isinstance(new_dtype, tuple):
for component, new_dt in safe_zip(self.components, new_dtype):
component.dtype = new_dt
elif new_dtype is None or isinstance(new_dtype, str):
for component in self.components:
if not isinstance(component, NullSpace):
component.dtype = new_dtype
def restrict(self, subset):
"""
Returns a new Space containing only the components whose indices
are given in subset.
The new space will contain the components in the order given in the
subset list.
Parameters
----------
subset : WRITEME
Notes
-----
The returned Space may not be a CompositeSpace if `subset` contains
only one index.
"""
assert isinstance(subset, (list, tuple))
if len(subset) == 1:
idx, = subset
return self.components[idx]
return CompositeSpace([self.components[i] for i in subset])
def restrict_batch(self, batch, subset):
"""
Returns a batch containing only the components whose indices are
present in subset.
May not be a tuple anymore if there is only one index.
Outputs will be ordered in the order that they appear in subset.
Only supports symbolic batches.
Parameters
----------
batch : WRITEME
subset : WRITEME
"""
self._validate(is_numeric=False, batch=batch)
assert isinstance(subset, (list, tuple))
if len(subset) == 1:
idx, = subset
return batch[idx]
return tuple([batch[i] for i in subset])
@functools.wraps(Space.get_total_dimension)
def get_total_dimension(self):
return sum([component.get_total_dimension() for component in
self.components])
@functools.wraps(Space.make_shared_batch)
def make_shared_batch(self, batch_size, name=None, dtype=None):
dtype = self._clean_dtype_arg(dtype)
batch = self.get_origin_batch(batch_size, dtype)
def recursive_shared(batch):
if isinstance(batch, tuple):
return tuple(recursive_shared(b) for b in batch)
else:
return theano.shared(batch, name=name)
return recursive_shared(batch)
@functools.wraps(Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
"""
Supports formatting to a single VectorSpace, or to a CompositeSpace.
CompositeSpace->VectorSpace:
Traverses the nested components in depth-first order, serializing the
leaf nodes (i.e. the non-composite subspaces) into the VectorSpace.
CompositeSpace->CompositeSpace:
Only works for two CompositeSpaces that have the same nested
structure. Traverses both CompositeSpaces' nested components in
parallel, converting between corresponding non-composite components
in <self> and <space> as:
`self_component._format_as(is_numeric,
batch_component,
space_component)`
Parameters
----------
batch : WRITEME
space : WRITEME
Returns
-------
WRITEME
"""
if isinstance(space, VectorSpace):
pieces = []
for component, input_piece in zip(self.components, batch):
subspace = VectorSpace(dim=component.get_total_dimension(),
dtype=space.dtype,
sparse=space.sparse)
pieces.append(component._format_as(is_numeric,
input_piece,
subspace))
# Pieces should all have the same dtype, before we concatenate them
if len(pieces) > 0:
for piece in pieces[1:]:
if pieces[0].dtype != piece.dtype:
assert space.dtype is None
raise TypeError("Tried to format components with "
"differing dtypes into a VectorSpace "
"with no dtype of its own. "
"dtypes: %s" %
str(tuple(str(p.dtype)
for p in pieces)))
if is_symbolic_batch(batch):
if space.sparse:
return theano.sparse.hstack(pieces)
else:
return tensor.concatenate(pieces, axis=1)
else:
if space.sparse:
return scipy.sparse.hstack(pieces)
else:
return np.concatenate(pieces, axis=1)
if isinstance(space, CompositeSpace):
def recursive_format_as(orig_space, batch, dest_space):
if not (isinstance(orig_space, CompositeSpace) ==
isinstance(dest_space, CompositeSpace)):
raise TypeError("Can't convert between CompositeSpaces "
"with different tree structures")
# No need to check batch's tree structure. Space._format_as()
# already did that by calling _validate(), before calling this
# method.
if isinstance(orig_space, CompositeSpace):
return tuple(recursive_format_as(os, bt, ds)
for os, bt, ds
in safe_zip(orig_space.components,
batch,
dest_space.components))
else:
return orig_space._format_as(is_numeric, batch, dest_space)
return recursive_format_as(self, batch, space)
raise NotImplementedError(str(self) +
" does not know how to format as " +
str(space))
@functools.wraps(Space._undo_format_as_impl)
def _undo_format_as_impl(self, batch, space):
"""
Undoes the formatting to a single VectorSpace, or to a CompositeSpace.
CompositeSpace->VectorSpace:
Traverses the nested components in depth-first order, serializing the
leaf nodes (i.e. the non-composite subspaces) into the VectorSpace.
CompositeSpace->CompositeSpace:
Only works for two CompositeSpaces that have the same nested
structure. Traverses both CompositeSpaces' nested components in
parallel, converting between corresponding non-composite components
in <self> and <space> as:
`self_component._format_as(is_numeric,
batch_component,
space_component)`
Parameters
----------
batch : WRITEME
space : WRITEME
Returns
-------
WRITEME
"""
if isinstance(space, VectorSpace):
# Undo join
if space.sparse:
owner = batch.owner
assert owner is not None
assert 'HStack' in str(owner.op)
batch = owner.inputs
else:
owner = batch.owner
assert owner is not None
assert str(owner.op) == 'Join'
# First component is join axis
batch = owner.inputs[1:]
def extract_dtype(dtype):
if isinstance(dtype, tuple):
return extract_dtype(dtype[0])
else:
return dtype
def compose_batch(composite_space, batch_list):
rval = ()
for sp, bt in safe_zip(composite_space.components, batch_list):
if False and isinstance(sp, CompositeSpace):
composed, batch_list = compose_batch(sp, batch_list)
rval += (composed,)
else:
sparse = getattr(sp, 'sparse', False)
dtype = extract_dtype(sp.dtype)
new_sp = VectorSpace(dim=sp.get_total_dimension(),
dtype=dtype,
sparse=sparse
)
new_batch = sp.undo_format_as(bt,
new_sp)
rval += (new_batch,)
return rval
composed = compose_batch(self, batch)
return composed
if isinstance(space, CompositeSpace):
def recursive_undo_format_as(orig_space, batch, dest_space):
if not (isinstance(orig_space, CompositeSpace) ==
isinstance(dest_space, CompositeSpace)):
raise TypeError("Can't convert between CompositeSpaces "
"with different tree structures")
# No need to check batch's tree structure.
# Space.undo_format_as() already did that
# by calling _validate(), before calling this
# method.
if isinstance(orig_space, CompositeSpace):
return tuple(recursive_undo_format_as(os, bt, ds)
for os, bt, ds
in safe_zip(orig_space.components,
batch,
dest_space.components))
else:
return orig_space.undo_format_as(batch,
dest_space)
return recursive_undo_format_as(self, batch, space)
raise NotImplementedError(str(self) +
" does not know how to format as " +
str(space))
@functools.wraps(Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
if not isinstance(batch, tuple):
raise TypeError("The value of a CompositeSpace batch should be a "
"tuple, but is %s of type %s." %
(batch, type(batch)))
if len(batch) != len(self.components):
raise ValueError("Expected %d elements in batch, got %d"
% (len(self.components), len(batch)))
for batch_elem, component in zip(batch, self.components):
component._validate(is_numeric, batch_elem)
def get_origin_batch(self, batch_size, dtype=None):
"""
Calls get_origin_batch on all subspaces, and returns a (nested)
tuple containing their return values.
Parameters
----------
batch_size : int
Batch size.
dtype : str
the dtype to use for all the get_origin_batch() calls on
subspaces. If dtype is None, or a single dtype string, that will
be used for all calls. If dtype is a (nested) tuple, it must
mirror the tree structure of this CompositeSpace.
"""
dtype = self._clean_dtype_arg(dtype)
return tuple(component.get_origin_batch(batch_size, dt)
for component, dt
in safe_zip(self.components, dtype))
@functools.wraps(Space.make_theano_batch)
def make_theano_batch(self,
name=None,
dtype=None,
batch_size=None):
"""
Calls make_theano_batch on all subspaces, and returns a (nested)
tuple containing their return values.
Parameters
----------
name : str
Name of the symbolic variable
dtype : str
The dtype of the returned batch.
If dtype is a string, it will be applied to all components.
If dtype is None, C.dtype will be used for each component C.
If dtype is a nested tuple, its elements will be applied to
corresponding elements in the components.
batch_size : int
Batch size.
"""
if name is None:
name = [None] * len(self.components)
elif not isinstance(name, (list, tuple)):
name = ['%s[%i]' % (name, i) for i in xrange(len(self.components))]
dtype = self._clean_dtype_arg(dtype)
assert isinstance(name, (list, tuple))
assert isinstance(dtype, (list, tuple))
rval = tuple([x.make_theano_batch(name=n,
dtype=d,
batch_size=batch_size)
for x, n, d in safe_zip(self.components,
name,
dtype)])
return rval
@functools.wraps(Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
def has_no_data(space):
"""
Returns True if space can contain no data.
"""
return (isinstance(subspace, NullSpace) or
(isinstance(subspace, CompositeSpace) and
len(subspace.components) == 0))
if is_symbolic_batch(batch):
for subspace, subbatch in safe_zip(self.components, batch):
if not has_no_data(subspace):
return subspace._batch_size(is_numeric, subbatch)
return 0 # TODO: shouldn't this line return a Theano object?
else:
result = None
for subspace, subbatch in safe_zip(self.components, batch):
batch_size = subspace._batch_size(is_numeric, subbatch)
if has_no_data(subspace):
assert batch_size == 0
else:
if result is None:
result = batch_size
elif batch_size != result:
raise ValueError("All non-empty components of a "
"CompositeSpace should have the same "
"batch size, but we encountered "
"components with size %s, then %s." %
(result, batch_size))
return 0 if result is None else result
def _clean_dtype_arg(self, dtype):
"""
If dtype is None or a string, this returns a nested tuple that mirrors
the tree structure of this CompositeSpace, with dtype at the leaves.
If dtype is a nested tuple, this checks that it has the same tree
structure as this CompositeSpace.
"""
super_self = super(CompositeSpace, self)
def make_dtype_tree(dtype, space):
"""
Creates a nested tuple tree that mirrors the tree structure of
<space>, populating the leaves with <dtype>.
"""
if isinstance(space, CompositeSpace):
return tuple(make_dtype_tree(dtype, component)
for component in space.components)
else:
return super_self._clean_dtype_arg(dtype)
def check_dtype_tree(dtype, space):
"""
Verifies that a dtype tree mirrors the tree structure of <space>,
calling Space._clean_dtype_arg on the leaves.
"""
if isinstance(space, CompositeSpace):
if not isinstance(dtype, tuple):
raise TypeError("Tree structure mismatch.")
return tuple(check_dtype_tree(dt, c)
for dt, c in safe_zip(dtype, space.components))
else:
if not (dtype is None or isinstance(dtype, str)):
raise TypeError("Tree structure mismatch.")
return super_self._clean_dtype_arg(dtype)
if dtype is None or isinstance(dtype, str):
dtype = super_self._clean_dtype_arg(dtype)
return make_dtype_tree(dtype, self)
else:
return check_dtype_tree(dtype, self)
class NullSpace(Space):
"""
A space that contains no data. As such, it has the following quirks:
* Its validate()/np_validate() methods only accept None.
* Its dtype string is "Nullspace's dtype".
* The source name associated to this Space is the empty string ('').
"""
# NullSpaces don't support validation callbacks, since they only take None
# as data batches.
def __init__(self):
super(NullSpace, self).__init__()
def __str__(self):
"""
.. todo::
WRITEME
"""
return "NullSpace"
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
return type(self) == type(other)
def __hash__(self):
"""
.. todo::
WRITEME
"""
return hash(type(self))
@property
def dtype(self):
"""
.. todo::
WRITEME
"""
return "%s's dtype" % self.__class__.__name__
@dtype.setter
def dtype(self, new_dtype):
"""
.. todo::
WRITEME
"""
if new_dtype != self.dtype:
raise TypeError('%s can only take the bogus dtype "%s"' %
(self.__class__.__name__,
self.dtype))
# otherwise, do nothing
@functools.wraps(Space.make_theano_batch)
def make_theano_batch(self, name=None, dtype=None):
return None
@functools.wraps(Space._validate_impl)
def _validate_impl(self, is_numeric, batch):
if batch is not None:
raise TypeError('NullSpace only accepts None, as a dummy data '
'batch. Instead, got %s of type %s'
% (batch, type(batch)))
@functools.wraps(Space._format_as_impl)
def _format_as_impl(self, is_numeric, batch, space):
assert isinstance(space, NullSpace)
return None
@functools.wraps(Space._batch_size_impl)
def _batch_size_impl(self, is_numeric, batch):
# There is no way to know how many examples would actually
# have been in the batch, since it is empty. We return 0.
self._validate(is_numeric, batch)
return 0
| bsd-3-clause | 4f4914929d8a60fdf57a0d2f988e2839 | 36.590977 | 79 | 0.528532 | 4.694681 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py | 44 | 23194 | """
WRITEME
"""
from __future__ import print_function
import inspect
import os
import StringIO
import theano
from theano.sandbox.cuda import CudaNdarrayType
from theano.gof import local_optimizer
from theano.sandbox.cuda.opt import register_opt
from theano.sandbox.cuda import gpu_from_host, host_from_gpu
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from .unshared_conv import FilterActs
from .unshared_conv import WeightActs
from .unshared_conv import ImgActs
_this_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
# XXX: move to cuda.opt and refactor there
def any_from_gpu(*vv):
"""
.. todo::
WRITEME
"""
for v in vv:
if v.owner and v.owner.op == host_from_gpu:
return True
return False
# XXX: move to cuda.opt and refactor there
def any_gpu_client(*vv):
"""
.. todo::
WRITEME
"""
for v in vv:
for (cl, pos) in v.clients:
if cl.op == gpu_from_host:
return True
return False
class Base(theano.Op):
"""
.. todo::
WRITEME
Parameters
----------
module_stride : WRITEME
partial_sum : WRITEME
"""
def __init__(self, module_stride, partial_sum):
self.module_stride = module_stride
self.partial_sum = partial_sum
def _attributes(self):
"""
.. todo::
WRITEME
"""
return (
self.module_stride,
self.partial_sum,
)
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
return (type(self) == type(other)
and self._attributes() == other._attributes())
def __hash__(self):
"""
.. todo::
WRITEME
"""
return hash((type(self), self._attributes()))
def __str__(self):
"""
.. todo::
WRITEME
"""
return '%s{module_stride=%i,partial_sum=%i}' % (
self.__class__.__name__,
self.module_stride,
self.partial_sum,
)
class GpuFilterActs(Base):
"""
.. todo::
WRITEME
"""
def make_node(self, images, filters):
"""
.. todo::
WRITEME
"""
ibcast = images.broadcastable
fbcast = filters.broadcastable
igroups, icolors_per_group, irows, icols, icount = ibcast
fmodulesR, fmodulesC, fcolors, frows, fcols = fbcast[:-2]
fgroups, filters_per_group = fbcast[-2:]
hbcast = (fgroups, filters_per_group, fmodulesR, fmodulesC, icount)
if not isinstance(images.type, CudaNdarrayType):
raise TypeError('gpu_filter_acts requires CudaNdarray images',
images)
if not isinstance(filters.type, CudaNdarrayType):
raise TypeError('gpu_filter_acts requires CudaNdarray filters',
filters)
htype = CudaNdarrayType(broadcastable=hbcast)
return theano.gof.Apply(self,
[images, filters],
[htype()])
def c_support_code(self):
"""
.. todo::
WRITEME
"""
cufile = open(os.path.join(_this_dir, 'filter_acts.cu'))
return cufile.read()
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return ()
def c_code(self, node, nodename, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
#z_out = alpha * dot(x,y) + beta * z_in
#inplace version, set set z_out = z_in
#not inplace version, we copy z_in to z_out.
images, filters, = inputs
responses, = outputs
fail = sub['fail']
moduleStride = str(self.module_stride)
sio = StringIO.StringIO()
print("""
//XXX: actually the rightmost images dimension can be strided
if (!CudaNdarray_is_c_contiguous(%(images)s))
{
PyErr_Format(PyExc_NotImplementedError,
"images not c contiguous");
%(fail)s;
}
if (!CudaNdarray_is_c_contiguous(%(filters)s))
{
PyErr_Format(PyExc_NotImplementedError,
"filters not c contiguous");
%(fail)s;
}
if (%(images)s->nd != 5)
{
PyErr_Format(PyExc_TypeError,
"images ndim (%%i) must be 5",
%(images)s->nd);
%(fail)s;
}
if (%(filters)s->nd != 7)
{
PyErr_Format(PyExc_TypeError,
"filters ndim (%%i) must be 7",
%(filters)s->nd);
%(fail)s;
}
//fprintf(stderr, "really running on GPU\\n");
{ // new scope, new vars
int igroups = CudaNdarray_HOST_DIMS(%(images)s)[0];
int icolors_per_group = CudaNdarray_HOST_DIMS(%(images)s)[1];
int irows = CudaNdarray_HOST_DIMS(%(images)s)[2];
int icols = CudaNdarray_HOST_DIMS(%(images)s)[3];
int icount = CudaNdarray_HOST_DIMS(%(images)s)[4];
int fmodulesR = CudaNdarray_HOST_DIMS(%(filters)s)[0];
int fmodulesC = CudaNdarray_HOST_DIMS(%(filters)s)[1];
int fcolors = CudaNdarray_HOST_DIMS(%(filters)s)[2];
int frows = CudaNdarray_HOST_DIMS(%(filters)s)[3];
int fcols = CudaNdarray_HOST_DIMS(%(filters)s)[4];
int fgroups = CudaNdarray_HOST_DIMS(%(filters)s)[5];
int filters_per_group = CudaNdarray_HOST_DIMS(%(filters)s)[6];
// XXX: use this parameter properly
int paddingStart = 0;
int imgStride = icount;
float scaleTargets = 0.0;
float scaleOutput = 1.0;
bool conv = false;
if (igroups != fgroups)
{
PyErr_Format(PyExc_ValueError,
"igroups != fgroups (%%i != %%i)",
igroups, fgroups);
%(fail)s;
}
if (icolors_per_group != fcolors)
{
PyErr_Format(PyExc_ValueError,
"icolors_per_group != fcolors (%%i != %%i)",
icolors_per_group,
fcolors);
%(fail)s;
}
if (!%(responses)s)
{
Py_XDECREF(%(responses)s);
int dims[5];
dims[0] = fgroups;
dims[1] = filters_per_group;
dims[2] = fmodulesR;
dims[3] = fmodulesC;
dims[4] = icount;
%(responses)s = (CudaNdarray*)CudaNdarray_NewDims(5, dims);
if (!%(responses)s)
{
%(fail)s;
}
}
assert(CudaNdarray_is_c_contiguous(%(responses)s));
if (_filterActs(
igroups,
icolors_per_group,
irows,
icols,
icount,
fmodulesR,
fmodulesC,
frows,
fcols,
filters_per_group,
CudaNdarray_DEV_DATA(%(images)s),
CudaNdarray_DEV_DATA(%(filters)s),
CudaNdarray_DEV_DATA(%(responses)s),
paddingStart,
%(moduleStride)s,
imgStride,
scaleTargets,
scaleOutput,
conv))
{
%(fail)s;
}
} // end bogus scope used for vars
""", file=sio)
return sio.getvalue() % locals()
@register_opt()
@local_optimizer([FilterActs])
def insert_gpu_filter_acts(node):
"""
.. todo::
WRITEME
"""
if isinstance(node.op, FilterActs):
images, filters = node.inputs
if any_from_gpu(images, filters) or any_gpu_client(*node.outputs):
gpu_filter_acts = GpuFilterActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_filter_acts(
gpu_from_host(images),
gpu_from_host(filters)))]
class GpuWeightActs(Base):
"""
.. todo::
WRITEME
"""
def make_node(self, images, hidacts, frows, fcols):
"""
.. todo::
WRITEME
"""
if self.partial_sum != 1:
# this corresponds to grad when doing convolution
raise NotImplementedError('partial sum')
frows = theano.tensor.as_tensor_variable(frows)
fcols = theano.tensor.as_tensor_variable(fcols)
if frows.dtype[:3] not in ('int', 'uin'):
raise TypeError(frows)
if fcols.dtype[:3] not in ('int', 'uin'):
raise TypeError(frows)
if frows.ndim:
raise TypeError('frows should be scalar', frows)
if fcols.ndim:
raise TypeError('fcols should be scalar', fcols)
igroups, icolors, irows, icols, icount = images.type.broadcastable
hgroups, hcolors, hrows, hcols, hcount = hidacts.type.broadcastable
otype = theano.sandbox.cuda.CudaNdarrayType(
broadcastable=(hrows, hcols, icolors,
False, False, hgroups, hcolors))
return theano.Apply(self,
[images, hidacts, frows, fcols],
[otype()])
def c_support_code(self):
"""
.. todo::
WRITEME
"""
cufile = open(os.path.join(_this_dir, 'weight_acts.cu'))
return cufile.read()
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return ()
def c_code(self, node, nodename, inames, onames, sub):
"""
.. todo::
WRITEME
"""
images, hidacts, frows, fcols = inames
dweights, = onames
fail = sub['fail']
moduleStride = str(self.module_stride)
sio = StringIO.StringIO()
print("""
if (!CudaNdarray_is_c_contiguous(%(images)s))
{
//XXX: Alex's code actually supports the rightmost images
// dimension strided
PyErr_Format(PyExc_NotImplementedError,
"images not c contiguous");
%(fail)s;
}
if (!CudaNdarray_is_c_contiguous(%(hidacts)s))
{
PyErr_Format(PyExc_NotImplementedError,
"hidacts not c contiguous");
%(fail)s;
}
if (%(images)s->nd != 5)
{
PyErr_Format(PyExc_TypeError,
"images ndim (%%i) must be 5",
%(images)s->nd);
%(fail)s;
}
if (%(hidacts)s->nd != 5)
{
PyErr_Format(PyExc_TypeError,
"hidacts ndim (%%i) must be 5",
%(images)s->nd);
%(fail)s;
}
if (PyArray_NDIM(%(frows)s) != 0)
{
PyErr_Format(PyExc_TypeError,
"frows ndim (%%i) must be 0",
PyArray_NDIM(%(frows)s));
%(fail)s;
}
if (PyArray_NDIM(%(fcols)s) != 0)
{
PyErr_Format(PyExc_TypeError,
"fcols ndim (%%i) must be 0",
PyArray_NDIM(%(fcols)s));
%(fail)s;
}
{ // new scope, new vars
int igroups = CudaNdarray_HOST_DIMS(%(images)s)[0];
int icolors_per_group = CudaNdarray_HOST_DIMS(%(images)s)[1];
int irows = CudaNdarray_HOST_DIMS(%(images)s)[2];
int icols = CudaNdarray_HOST_DIMS(%(images)s)[3];
int icount = CudaNdarray_HOST_DIMS(%(images)s)[4];
int hgroups = CudaNdarray_HOST_DIMS(%(hidacts)s)[0];
int hcolors_per_group = CudaNdarray_HOST_DIMS(%(hidacts)s)[1];
int hrows = CudaNdarray_HOST_DIMS(%(hidacts)s)[2];
int hcols = CudaNdarray_HOST_DIMS(%(hidacts)s)[3];
int hcount = CudaNdarray_HOST_DIMS(%(hidacts)s)[4];
int fmodulesR = hrows;
int fmodulesC = hcols;
int fcolors = icolors_per_group;
int frows = ((dtype_%(frows)s *) PyArray_DATA(%(frows)s))[0];
int fcols = ((dtype_%(fcols)s *) PyArray_DATA(%(fcols)s))[0];
int fgroups = hgroups;
int filters_per_group = hcolors_per_group;
// XXX: use this parameter properly
int paddingStart = 0;
int imgStride = icount;
float scaleTargets = 0.0;
float scaleOutput = 1.0;
int moduleStride = %(moduleStride)s;
int partialSum = 1; // set to 0 for convolution.
if (igroups != hgroups)
{
PyErr_Format(PyExc_ValueError,
"igroups != hgroups (%%i != %%i)",
igroups, hgroups);
%(fail)s;
}
if (icolors_per_group != fcolors)
{
PyErr_Format(PyExc_ValueError,
"icolors_per_group != fcolors (%%i != %%i)",
icolors_per_group,
fcolors);
%(fail)s;
}
if (icount != hcount)
{
PyErr_Format(PyExc_ValueError,
"icount != hcount (%%i != %%i)",
icount,
hcount);
%(fail)s;
}
// XXX: CHECK SHAPE IS CORRECT
if (!%(dweights)s)
{
Py_XDECREF(%(dweights)s);
int dims[7];
dims[0] = fmodulesR;
dims[1] = fmodulesC;
dims[2] = fcolors;
dims[3] = frows;
dims[4] = fcols;
dims[5] = fgroups;
dims[6] = filters_per_group;
%(dweights)s = (CudaNdarray*)CudaNdarray_NewDims(7, dims);
if (!%(dweights)s)
{
%(fail)s;
}
}
assert(CudaNdarray_is_c_contiguous(%(dweights)s));
if (_weightActs(
igroups,
icolors_per_group,
irows,
icols,
icount,
fmodulesR,
fmodulesC,
frows,
fcols,
filters_per_group,
CudaNdarray_DEV_DATA(%(images)s),
CudaNdarray_DEV_DATA(%(hidacts)s),
CudaNdarray_DEV_DATA(%(dweights)s),
paddingStart,
moduleStride,
imgStride,
scaleTargets,
scaleOutput,
partialSum))
{
%(fail)s;
}
} // end bogus scope used for vars
""", file=sio)
return sio.getvalue() % locals()
@register_opt()
@local_optimizer([WeightActs])
def insert_gpu_weight_acts(node):
"""
.. todo::
WRITEME
"""
if isinstance(node.op, WeightActs):
"""
.. todo::
WRITEME
"""
images, hidacts, frows, fcols = node.inputs
if any_from_gpu(images, hidacts) or any_gpu_client(*node.outputs):
gpu_weight_acts = GpuWeightActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_weight_acts(
gpu_from_host(images),
gpu_contiguous(hidacts),
frows,
fcols,
))]
class GpuImgActs(Base):
"""
.. todo::
WRITEME
"""
def make_node(self, filters, hidacts, irows, icols):
"""
.. todo::
WRITEME
"""
irows = theano.tensor.as_tensor_variable(irows)
icols = theano.tensor.as_tensor_variable(icols)
if irows.dtype[:3] not in ('int', 'uin'):
raise TypeError(irows)
if icols.dtype[:3] not in ('int', 'uin'):
raise TypeError(irows)
if irows.ndim:
raise TypeError('irows should be scalar', irows)
if icols.ndim:
raise TypeError('icols should be scalar', icols)
return theano.gof.Apply(self,
[filters, hidacts, irows, icols],
[hidacts.type()])
def c_support_code(self):
"""
.. todo::
WRITEME
"""
cufile = open(os.path.join(_this_dir, 'raw_img_acts.cu'))
return cufile.read()
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return ()
def c_code(self, node, nodename, inames, onames, sub):
"""
.. todo::
WRITEME
"""
filters, hidacts, irows, icols = inames
dimages, = onames
fail = sub['fail']
moduleStride = str(self.module_stride)
sio = StringIO.StringIO()
print("""
if (!CudaNdarray_is_c_contiguous(%(filters)s))
{
//XXX: Alex's code actually supports the rightmost images
// dimension strided
PyErr_Format(PyExc_NotImplementedError,
"images not c contiguous");
%(fail)s;
}
if (!CudaNdarray_is_c_contiguous(%(hidacts)s))
{
PyErr_Format(PyExc_NotImplementedError,
"hidacts not c contiguous");
%(fail)s;
}
if (%(filters)s->nd != 7)
{
PyErr_Format(PyExc_TypeError,
"images ndim (%%i) must be 7",
%(filters)s->nd);
%(fail)s;
}
if (%(hidacts)s->nd != 5)
{
PyErr_Format(PyExc_TypeError,
"hidacts ndim (%%i) must be 5",
%(hidacts)s->nd);
%(fail)s;
}
if (PyArray_NDIM(%(irows)s) != 0)
{
PyErr_Format(PyExc_TypeError,
"frows ndim (%%i) must be 0",
PyArray_NDIM(%(irows)s));
%(fail)s;
}
if (PyArray_NDIM(%(icols)s) != 0)
{
PyErr_Format(PyExc_TypeError,
"fcols ndim (%%i) must be 0",
PyArray_NDIM(%(icols)s));
%(fail)s;
}
{ // new scope, new vars
int fmodulesR = CudaNdarray_HOST_DIMS(%(filters)s)[0];
int fmodulesC = CudaNdarray_HOST_DIMS(%(filters)s)[1];
int fcolors = CudaNdarray_HOST_DIMS(%(filters)s)[2];
int frows = CudaNdarray_HOST_DIMS(%(filters)s)[3];
int fcols = CudaNdarray_HOST_DIMS(%(filters)s)[4];
int fgroups = CudaNdarray_HOST_DIMS(%(filters)s)[5];
int filters_per_group = CudaNdarray_HOST_DIMS(%(filters)s)[6];
int hgroups = CudaNdarray_HOST_DIMS(%(hidacts)s)[0];
int hcolors_per_group = CudaNdarray_HOST_DIMS(%(hidacts)s)[1];
int hrows = CudaNdarray_HOST_DIMS(%(hidacts)s)[2];
int hcols = CudaNdarray_HOST_DIMS(%(hidacts)s)[3];
int hcount = CudaNdarray_HOST_DIMS(%(hidacts)s)[4];
int igroups = fgroups;
int icolors_per_group = fcolors;
int irows = ((dtype_%(irows)s *) PyArray_DATA(%(irows)s))[0];
int icols = ((dtype_%(icols)s *) PyArray_DATA(%(icols)s))[0];
int icount = hcount;
// TODO: use this parameter properly
int paddingStart = 0;
float scaleTargets = 0.0;
float scaleOutput = 1.0;
int moduleStride = %(moduleStride)s;
bool conv = 0;
if (hgroups != fgroups)
{
PyErr_Format(PyExc_ValueError,
"hgroups != fgroups (%%i != %%i)",
hgroups, fgroups);
%(fail)s;
}
if (hcolors_per_group != filters_per_group)
{
PyErr_Format(PyExc_ValueError,
"hcolors_per_group != filters_per_group (%%i != %%i)",
hcolors_per_group,
filters_per_group);
%(fail)s;
}
// XXX: CHECK SHAPE IS CORRECT
if (!%(dimages)s)
{
Py_XDECREF(%(dimages)s);
int dims[5];
dims[0] = igroups;
dims[1] = icolors_per_group;
dims[2] = irows;
dims[3] = icols;
dims[4] = icount;
%(dimages)s = (CudaNdarray*)CudaNdarray_NewDims(5, dims);
if (!%(dimages)s)
{
%(fail)s;
}
}
assert(CudaNdarray_is_c_contiguous(%(dimages)s));
if (paddingStart + (fmodulesR - 1) * moduleStride + frows < irows)
{
PyErr_Format(PyExc_ValueError,
"uhoh123: %%i %%i %%i %%i %%i",
paddingStart,
fmodulesR,
moduleStride,
frows,
irows);
%(fail)s;
}
if (_imgActs(
fgroups,
filters_per_group,
fcolors,
hcount,
fmodulesR,
fmodulesC,
frows,
fcols,
irows,
icols,
CudaNdarray_DEV_DATA(%(filters)s),
CudaNdarray_DEV_DATA(%(hidacts)s),
CudaNdarray_DEV_DATA(%(dimages)s),
paddingStart,
moduleStride,
scaleTargets,
scaleOutput,
conv))
{
%(fail)s;
}
} // end bogus scope used for vars
""", file=sio)
return sio.getvalue() % locals()
@register_opt()
@local_optimizer([ImgActs])
def insert_gpu_img_acts(node):
"""
.. todo::
WRITEME
"""
if isinstance(node.op, ImgActs):
filters, hidacts, irows, icols = node.inputs
if any_from_gpu(filters, hidacts) or any_gpu_client(*node.outputs):
gpu_img_acts = GpuImgActs(
module_stride=node.op.module_stride,
partial_sum=1)
return [host_from_gpu(gpu_img_acts(
gpu_from_host(filters),
gpu_contiguous(hidacts),
irows,
icols,
))]
| bsd-3-clause | 9eb51f198fab1c73c61cd311e77f85e8 | 28.248424 | 85 | 0.454902 | 4.087769 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/datasets/cos_dataset.py | 45 | 3759 | """
.. todo::
WRITEME
"""
import numpy as N
import copy
from theano import config
import theano.tensor as T
from pylearn2.utils.rng import make_np_rng
class CosDataset(object):
"""
Makes a dataset that streams randomly generated 2D examples.
The first coordinate is sampled from a uniform distribution.
The second coordinate is the cosine of the first coordinate,
plus some gaussian noise.
"""
def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=None):
"""
.. todo::
WRITEME
"""
self.min_x, self.max_x, self.std = min_x, max_x, std
rng = make_np_rng(rng, [17, 2, 946], which_method=['uniform', 'randn'])
self.default_rng = copy.copy(rng)
self.rng = rng
def energy(self, mat):
"""
.. todo::
WRITEME
"""
x = mat[:, 0]
y = mat[:, 1]
rval = (y - N.cos(x)) ** 2. / (2. * (self.std ** 2.))
return rval
def pdf_func(self, mat):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the pdf from which the examples are drawn.
"""
x = mat[:, 0]
y = mat[:, 1]
rval = N.exp(-(y - N.cos(x)) ** 2. / (2. * (self.std ** 2.)))
rval /= N.sqrt(2.0 * N.pi * (self.std ** 2.))
rval /= (self.max_x - self.min_x)
rval *= x < self.max_x
rval *= x > self.min_x
return rval
def free_energy(self, X):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the energy function for the distribution from
which the examples are drawn.
"""
x = X[:, 0]
y = X[:, 1]
rval = T.sqr(y - T.cos(x)) / (2. * (self.std ** 2.))
mask = x < self.max_x
mask = mask * (x > self.min_x)
rval = mask * rval + (1 - mask) * 1e30
return rval
def pdf(self, X):
"""
.. todo::
WRITEME properly
This dataset can generate an infinite amount of examples.
This function gives the pdf from which the examples are drawn.
"""
x = X[:, 0]
y = X[:, 1]
rval = T.exp(-T.sqr(y - T.cos(x)) / (2. * (self.std ** 2.)))
rval /= N.sqrt(2.0 * N.pi * (self.std ** 2.))
rval /= (self.max_x - self.min_x)
rval *= x < self.max_x
rval *= x > self.min_x
return rval
def get_stream_position(self):
"""
.. todo::
WRITEME
"""
return copy.copy(self.rng)
def set_stream_position(self, s):
"""
.. todo::
WRITEME
"""
self.rng = copy.copy(s)
def restart_stream(self):
"""
.. todo::
WRITEME
"""
self.reset_RNG()
def reset_RNG(self):
"""
.. todo::
WRITEME
"""
if 'default_rng' not in dir(self):
self.default_rng = N.random.RandomState([17, 2, 946])
self.rng = copy.copy(self.default_rng)
def apply_preprocessor(self, preprocessor, can_fit=False):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
def get_batch_design(self, batch_size):
"""
.. todo::
WRITEME
"""
x = N.cast[config.floatX](self.rng.uniform(self.min_x, self.max_x,
(batch_size, 1)))
y = N.cos(x) + (N.cast[config.floatX](self.rng.randn(*x.shape)) *
self.std)
rval = N.hstack((x, y))
return rval
| bsd-3-clause | b476ad697dd5c8e64f01fed8fd09de53 | 23.89404 | 79 | 0.478053 | 3.725471 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/distributions/uniform_hypersphere.py | 49 | 2116 | """
.. todo::
WRITEME
"""
uthors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as N
import theano.tensor as T
from theano import config
from scipy.special import gammaln
from pylearn2.utils.rng import make_theano_rng
class UniformHypersphere(object):
"""
.. todo::
WRITEME
"""
def __init__(self, dim, radius):
self.dim = dim
self.radius = radius
self.s_rng = make_theano_rng(None, 42, which_method='normal')
log_C = ((float(self.dim) / 2.) * N.log(N.pi) -
gammaln(1. + float(self.dim) / 2.))
self.logZ = N.log(self.dim) + log_C + (self.dim - 1) * N.log(radius)
assert not N.isnan(self.logZ)
assert not N.isinf(self.logZ)
def free_energy(self, X):
"""
.. todo::
WRITEME properly
Parameters
----------
X : WRITEME
Must contain only examples that lie on the hypersphere
"""
#design matrix format
return T.zeros_like(X[:, 0])
def log_prob(self, X):
"""
.. todo::
WRITEME
"""
return - self.free_energy(X) - self.logZ
def random_design_matrix(self, m):
"""
.. todo::
WRITEME
"""
Z = self.s_rng.normal(size=(m, self.dim),
avg=0., std=1., dtype=config.floatX)
Z.name = 'UH.rdm.Z'
sq_norm_Z = T.sum(T.sqr(Z), axis=1)
sq_norm_Z.name = 'UH.rdm.sq_norm_Z'
eps = 1e-6
mask = sq_norm_Z < eps
mask.name = 'UH.rdm.mask'
Z = (Z.T * (1. - mask) + mask).T
Z.name = 'UH.rdm.Z2'
sq_norm_Z = sq_norm_Z * (1. - mask) + self.dim * mask
sq_norm_Z.name = 'UH.rdm.sq_norm_Z2'
norm_Z = T.sqrt(sq_norm_Z)
norm_Z.name = 'UH.rdm.sq_norm_Z2'
rval = self.radius * (Z.T / norm_Z).T
rval.name = 'UH.rdm.rval'
return rval
| bsd-3-clause | e28bcfe1033155a492539b831bbcdc8d | 25.45 | 76 | 0.512287 | 3.084548 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/optimization/linear_cg.py | 49 | 2914 | """
.. todo::
WRITEME
"""
import theano
from theano import tensor
from theano.ifelse import ifelse
def linear_cg(fn, params, tol=1e-3, max_iters=1000, floatX=None):
"""
Minimizes a POSITIVE DEFINITE quadratic function via linear conjugate
gradient using the R operator to avoid explicitly representing the Hessian.
If you have several variables, this is cheaper than Newton's method, which
would need to invert the Hessian. It is also cheaper than standard linear
conjugate gradient, which works with an explicit representation of the
Hessian. It is also cheaper than nonlinear conjugate gradient which does a
line search by repeatedly evaluating f.
For more information about linear conjugate gradient, you may look at
http://en.wikipedia.org/wiki/Conjugate_gradient_method .
(This reference describes linear CG but not converting it to use
the R operator instead of an explicit representation of the Hessian)
Parameters
----------
params : WRITEME
f : theano_like
A theano expression which is quadratic with POSITIVE DEFINITE hessian
in x
x : list
List of theano shared variables that influence f
tol : float
Minimization halts when the norm of the gradient is smaller than tol
Returns
-------
rval : theano_like
The solution in form of a symbolic expression (or list of
symbolic expressions)
"""
provided_as_list = True
if not isinstance(params, (list,tuple)):
params = [params]
provided_as_list = False
n_params = len(params)
def loop(rsold, *args):
ps = args[:n_params]
rs = args[n_params:2*n_params]
xs = args[2*n_params:]
Aps = []
for param in params:
rval = tensor.Rop(tensor.grad(fn,param), params, ps)
if isinstance(rval, (list, tuple)):
Aps.append(rval[0])
else:
Aps.append(rval)
alpha = rsold/ sum( (x*y).sum() for x,y in zip(Aps, ps) )
xs = [ x - alpha*p for x,p in zip(xs,ps)]
rs = [ r - alpha*Ap for r,Ap in zip(rs,Aps)]
rsnew = sum( (r*r).sum() for r in rs)
ps = [ r + rsnew/rsold*p for r,p in zip(rs,ps)]
return [rsnew]+ps+rs+xs, theano.scan_module.until(rsnew < tol)
r0s = tensor.grad(fn, params)
if not isinstance(r0s, (list,tuple)):
r0s = [r0s]
p0s = [x for x in r0s]
x0s = params
rsold = sum( (r*r).sum() for r in r0s)
outs, updates = theano.scan( loop,
outputs_info = [rsold] + p0s+r0s+x0s,
n_steps = max_iters,
name = 'linear_conjugate_gradient')
fxs = outs[1+2*n_params:]
fxs = [ifelse(rsold <tol, x0, x[-1]) for x0,x in zip(x0s, fxs)]
if not provided_as_list:
return fxs[0]
else:
return fxs
| bsd-3-clause | 83ce8550fcf4b2e7dda993e9d8b92c58 | 33.690476 | 79 | 0.604324 | 3.610905 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/expr/sampling.py | 49 | 1815 | """
.. todo::
WRITEME
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from theano.sandbox.rng_mrg import MRG_RandomStreams
from pylearn2.blocks import Block
from pylearn2.utils.rng import make_theano_rng
class SampleBernoulli(Block):
"""
.. todo::
WRITEME
Parameters
----------
theano_rng : WRITEME
seed : WRITEME
input_space : WRITEME
"""
def __init__(self, theano_rng = None, seed=None, input_space=None):
super(SampleBernoulli, self).__init__()
assert theano_rng is None or seed is None
theano_rng = make_theano_rng(theano_rng if theano_rng is not None else seed,
2012+11+22, which_method='binomial')
self.__dict__.update(locals())
del self.self
def __call__(self, inputs):
"""
.. todo::
WRITEME
"""
if self.input_space:
self.input_space.validate(inputs)
return self.theano_rng.binomial(p=inputs, size=inputs.shape, dtype=inputs.dtype)
def set_input_space(self, space):
"""
.. todo::
WRITEME
"""
self.input_space = space
def get_input_space(self):
"""
.. todo::
WRITEME
"""
if self.input_space is not None:
return self.input_space
raise ValueError("No input space was specified for this Block (%s). "
"You can call set_input_space to correct that." % str(self))
def get_output_space(self):
"""
.. todo::
WRITEME
"""
return self.get_input_space()
| bsd-3-clause | 8a72070624293b71489521890df95e36 | 23.527027 | 88 | 0.552617 | 3.937093 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/sandbox/cuda_convnet/stochastic_pool.py | 2 | 19249 | """
GPU op for Stochastic max pooling as defined in:
Stochastic Pooling for Regularization of Deep Convolutional Neural Networks
Matthew D. Zeiler, Rob Fergus, ICLR 2013
The code is written around Alex Krizhevsky's cuda-convnet
"""
__authors__ = "Mehdi Mirza"
__copyright__ = "Copyright 2010-2013, Universite de Montreal"
__credits__ = ["Mehdi Mirza", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "Mehdi Mirza"
__email__ = "mirzamom@iro"
import warnings
import numpy
from theano import shared
from theano.gof import Apply
from theano.sandbox.cuda import CudaNdarrayType
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda import GpuOp
from theano.tensor import get_scalar_constant_value, NotScalarConstantError, zeros_like
from pylearn2.sandbox.cuda_convnet.base_acts import UnimplementedError
from pylearn2.sandbox.cuda_convnet.convnet_compile import convnet_available
from pylearn2.sandbox.cuda_convnet.convnet_compile import cuda_convnet_loc
from pylearn2.sandbox.cuda_convnet.shared_code import this_dir
from pylearn2.sandbox.cuda_convnet.pool import MaxPoolGrad
def stochastic_max_pool_c01b(c01b, pool_shape, pool_stride, start=0, seed = 1234):
"""
.. todo::
WRITEME
"""
assert pool_shape[0] == pool_shape[1]
assert pool_stride[0] == pool_stride[1]
op = StochasticMaxPool(pool_shape[0], pool_stride[0], start, seed)
c01b = gpu_contiguous(c01b)
return op(c01b)
def weighted_max_pool_c01b(c01b, pool_shape, pool_stride, start=0):
"""
.. todo::
WRITEME
"""
assert pool_shape[0] == pool_shape[1]
assert pool_stride[0] == pool_stride[1]
op = WeightedMaxPool(pool_shape[0], pool_stride[0], start)
c01b = gpu_contiguous(c01b)
return op(c01b)
class StochasticMaxPool(GpuOp):
"""
Stochastic MaxPool op code on the GPU.
The input are in the order (channel, image rows, image cols, batch)
Works only on square images and the grad works only when
channel % 16 == 0.
Parameters
----------
ds : int
defines the size of the pooling region in the x (equivalently, y)
dimension. Squares of size (ds)2 get reduced to one value by this
layer. There are no restrictions on the value of this parameter. It's
fine for a pooling square to fall off the boundary of the image. Named
SizeX in Alex's code.
stride : int
defines the stride size between successive pooling squares. Setting
this parameter smaller than sizeX produces overlapping pools. Setting
it equal to sizeX gives the usual, non-overlapping pools. Values
greater than sizeX are not allowed.
start : int, optional
tells the net where in the input image to start the pooling (in x,y
coordinates). In principle, you can start anywhere you want. Setting
this to a positive number will cause the net to discard some pixels at
the top and at the left of the image. Setting this to a negative number
will cause it to include pixels that don't exist (which is fine).
start=0 is the usual setting.
outputs : int, optional
allows you to control how many output values in the x (equivalently, y)
dimension this operation will produce. This parameter is analogous to
the start parameter, in that it allows you to discard some portion of
the image by setting it to a value small enough to leave part of the
image uncovered. Setting it to zero instructs the net to produce as
many outputs as is necessary to ensure that the whole image is covered.
default 0
seed : WRITEME
"""
def __init__(self, ds, stride, start=0, outputs=0, seed = 1234):
self.ds = ds
self.stride = stride
self.start = start
self.copy_non_contiguous = 0
self.seed_state = shared(numpy.asarray(seed).astype('float32'))
self.seed_state.default_update = self.seed_state + 1
assert stride > 0 and stride <= ds, (stride, ds)
assert ds > 0, ds # We check in the code if ds <= imgSizeX
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (type(self) == type(other) and
self.ds == other.ds and
self.stride == other.stride and
self.start == other.start)
def __hash__(self):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (hash(type(self)) ^ hash(self.ds) ^
hash(self.stride) ^ hash(self.start))
def c_header_dirs(self):
"""
.. todo::
WRITEME
"""
return [this_dir]
def c_headers(self):
"""
.. todo::
WRITEME
"""
return ['nvmatrix.cuh', 'conv_util.cuh']
def c_lib_dirs(self):
"""
.. todo::
WRITEME
"""
return [cuda_convnet_loc]
def c_libraries(self):
"""
.. todo::
WRITEME
"""
return ['cuda_convnet']
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return (1,)
def _argument_contiguity_check(self, arg_name):
"""
.. todo::
WRITEME
"""
return """
if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))
{
if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {
PyErr_SetString(PyExc_ValueError,
"%(class)s: %(arg_name)s must be C contiguous");
%%(fail)s;
}
}
""" % {
'class': self.__class__.__name__,
'arg_name': arg_name,
'class_name_caps': self.__class__.__name__.upper(),
}
def make_node(self, images):
"""
.. todo::
WRITEME
"""
images = as_cuda_ndarray_variable(images)
assert images.ndim == 4
channels_broadcastable = images.type.broadcastable[0]
batch_broadcastable = images.type.broadcastable[3]
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
seed = self.seed_state
seed = as_cuda_ndarray_variable(seed)
return Apply(self, [images, seed], [targets])
def c_code(self, node, name, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
images, seed = inputs
targets, = outputs
fail = sub['fail']
# The amount of braces that must be closed at the end
num_braces = 0
if self.copy_non_contiguous:
raise UnimplementedError()
else:
basic_setup = "#define STOCHASTICMAXPOOL_COPY_NON_CONTIGUOUS 0\n"
# Convert images in nv_images, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_images = self._argument_contiguity_check("images") + """
if (%(images)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"images must have nd=4, got nd=%%i", %(images)s->nd);
%(fail)s;
}
{ //setup_nv_images brace 1
const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);
const int img_channels = images_dims[0];
const int imgSizeY = images_dims[1];
const int imgSizeX = images_dims[2];
const int batch_size = images_dims[3];
if(imgSizeY != imgSizeX){
PyErr_Format(PyExc_ValueError,
"images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",
img_channels, imgSizeY, imgSizeX, batch_size);
%(fail)s;
}
if(%(ds)s > imgSizeY){
PyErr_Format(PyExc_ValueError,
"ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",
%(ds)s, imgSizeX, imgSizeY);
%(fail)s;
}
if(%(start)s >= imgSizeX){
PyErr_Format(PyExc_ValueError,
"start is %%d but must be smaller then the images size of %%d x %%d.",
%(start)s, imgSizeX, imgSizeY);
%(fail)s;
}
NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,
"MaxPool:nv_images");
//int * seed = CudaNdarray_HOST_DIMS%(seed)s;
float * seed = CudaNdarray_DEV_DATA(%(seed)s);
//int * seed = %(seed)s;
"""
num_braces += 1
setup_nv_targets = """
//int _outputsX = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;
int target_dims [] = {
img_channels,
_outputsX,
_outputsX,
batch_size };
if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))
{
%(fail)s;
}
{ // setup_nv_target brace # 1
NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],
target_dims[3], "MaxPool:nv_targets");
"""
num_braces += 1
do_pool = """
convLocalStochasticMaxPool(nv_images, nv_targets, img_channels, %(ds)s,
%(start)s, %(stride)s, _outputsX, MaxPooler(), seed);
"""
braces = '}' * num_braces
rval = (basic_setup +
setup_nv_images +
setup_nv_targets +
do_pool +
braces)
start = self.start
stride = self.stride
ds = self.ds
rval = rval % locals()
return rval
def grad(self, inp, grads):
"""
.. todo::
WRITEME
"""
x, seed = inp
gz, = grads
gz = gpu_contiguous(gz)
maxout = self(x)
return [MaxPoolGrad(self.ds, self.stride, self.start)(x, maxout, gz), zeros_like(seed)]
# Make sure the cuda_convnet library is compiled and up-to-date
def make_thunk(self, *args, **kwargs):
"""
.. todo::
WRITEME
"""
if not convnet_available():
raise RuntimeError('Could not compile cuda_convnet')
return super(StochasticMaxPool, self).make_thunk(*args, **kwargs)
class WeightedMaxPool(GpuOp):
"""
This op wrap Alex's MaxPool code on the GPU.
The input are in the order (channel, image rows, image cols, batch)
Works only on square images and the grad works only when
channel % 16 == 0.
Parameters
----------
ds : int
defines the size of the pooling region in the x (equivalently, y)
dimension. Squares of size (ds)2 get reduced to one value by this
layer. There are no restrictions on the value of this parameter. It's
fine for a pooling square to fall off the boundary of the image. Named
SizeX in Alex's code.
stride : int
defines the stride size between successive pooling squares. Setting
this parameter smaller than sizeX produces overlapping pools. Setting
it equal to sizeX gives the usual, non-overlapping pools. Values
greater than sizeX are not allowed.
start : int, optional
tells the net where in the input image to start the pooling (in x,y
coordinates). In principle, you can start anywhere you want. Setting
this to a positive number will cause the net to discard some pixels at
the top and at the left of the image. Setting this to a negative number
will cause it to include pixels that don't exist (which is fine).
start=0 is the usual setting.
outputs : int, optional
allows you to control how many output values in the x (equivalently, y)
dimension this operation will produce. This parameter is analogous to
the start parameter, in that it allows you to discard some portion of
the image by setting it to a value small enough to leave part of the
image uncovered. Setting it to zero instructs the net to produce as
many outputs as is necessary to ensure that the whole image is covered.
default 0
"""
def __init__(self, ds, stride, start=0, outputs=0):
self.ds = ds
self.stride = stride
self.start = start
self.copy_non_contiguous = 0
assert stride > 0 and stride <= ds, (stride, ds)
assert ds > 0, ds # We check in the code if ds <= imgSizeX
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (type(self) == type(other) and
self.ds == other.ds and
self.stride == other.stride and
self.start == other.start)
def __hash__(self):
"""
.. todo::
WRITEME
"""
#Dont put copy_non_contigous as this doesn't change the output
return (hash(type(self)) ^ hash(self.ds) ^
hash(self.stride) ^ hash(self.start))
def c_header_dirs(self):
"""
.. todo::
WRITEME
"""
return [this_dir]
def c_headers(self):
"""
.. todo::
WRITEME
"""
return ['nvmatrix.cuh', 'conv_util.cuh']
def c_lib_dirs(self):
"""
.. todo::
WRITEME
"""
return [cuda_convnet_loc]
def c_libraries(self):
"""
.. todo::
WRITEME
"""
return ['cuda_convnet']
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return (1,)
def _argument_contiguity_check(self, arg_name):
"""
.. todo::
WRITEME
"""
return """
if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))
{
if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {
PyErr_SetString(PyExc_ValueError,
"%(class)s: %(arg_name)s must be C contiguous");
%%(fail)s;
}
}
""" % {
'class': self.__class__.__name__,
'arg_name': arg_name,
'class_name_caps': self.__class__.__name__.upper(),
}
def make_node(self, images):
"""
.. todo::
WRITEME
"""
images = as_cuda_ndarray_variable(images)
assert images.ndim == 4
channels_broadcastable = images.type.broadcastable[0]
batch_broadcastable = images.type.broadcastable[3]
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
return Apply(self, [images], [targets])
def c_code(self, node, name, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
images, = inputs
targets, = outputs
fail = sub['fail']
# The amount of braces that must be closed at the end
num_braces = 0
if self.copy_non_contiguous:
raise UnimplementedError()
else:
basic_setup = "#define WEIGHTEDMAXPOOL_COPY_NON_CONTIGUOUS 0\n"
# Convert images in nv_images, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_images = self._argument_contiguity_check("images") + """
if (%(images)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"images must have nd=4, got nd=%%i", %(images)s->nd);
%(fail)s;
}
{ //setup_nv_images brace 1
const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);
const int img_channels = images_dims[0];
const int imgSizeY = images_dims[1];
const int imgSizeX = images_dims[2];
const int batch_size = images_dims[3];
if(imgSizeY != imgSizeX){
PyErr_Format(PyExc_ValueError,
"images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",
img_channels, imgSizeY, imgSizeX, batch_size);
%(fail)s;
}
if(%(ds)s > imgSizeY){
PyErr_Format(PyExc_ValueError,
"ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",
%(ds)s, imgSizeX, imgSizeY);
%(fail)s;
}
if(%(start)s >= imgSizeX){
PyErr_Format(PyExc_ValueError,
"start is %%d but must be smaller then the images size of %%d x %%d.",
%(start)s, imgSizeX, imgSizeY);
%(fail)s;
}
NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,
"MaxPool:nv_images");
"""
num_braces += 1
setup_nv_targets = """
//int _outputsX = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;
int target_dims [] = {
img_channels,
_outputsX,
_outputsX,
batch_size };
if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))
{
%(fail)s;
}
{ // setup_nv_target brace # 1
NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],
target_dims[3], "MaxPool:nv_targets");
"""
num_braces += 1
do_pool = """
convLocalWeightedPool(nv_images, nv_targets, img_channels, %(ds)s,
%(start)s, %(stride)s, _outputsX, MaxPooler());
"""
braces = '}' * num_braces
rval = (basic_setup +
setup_nv_images +
setup_nv_targets +
do_pool +
braces)
start = self.start
stride = self.stride
ds = self.ds
rval = rval % locals()
return rval
def grad(self, inp, grads):
"""
.. todo::
WRITEME
"""
raise NotImplementedError()
# Make sure the cuda_convnet library is compiled and up-to-date
def make_thunk(self, node, storage_map, compute_map, no_recycling):
"""
.. todo::
WRITEME
"""
if not convnet_available():
raise RuntimeError('Could not compile cuda_convnet')
return super(WeightedMaxPool, self).make_thunk(
node, storage_map, compute_map, no_recycling)
| bsd-3-clause | 62810aac50316353f43dcc8b54442bee | 30.197731 | 111 | 0.55312 | 3.926765 | false | false | false | false |
lisa-lab/pylearn2 | pylearn2/scripts/jobman/tester.py | 44 | 3126 | """
This an example script inserting a pylearn2 yaml code into a jobman database.
The code below defines a yaml template string in state.yaml_template,
and the values of its hyper-parameters in state.hyper_parameters, and
run the code that is located in state.extract_results on this model
using jobman.
Actually, we add the job here and it can be launched later as usual
(please check how to start jobs using jobman from the jobman tutorial
website)
"""
from nose.plugins.skip import SkipTest
try:
from jobman.tools import DD, flatten
from jobman import api0, sql
except ImportError:
raise SkipTest()
from pylearn2.scripts.jobman import experiment
def result_extractor(train_obj):
"""
This is a user specific function, that is used by jobman to extract results
The returned dictionary will be saved in state.results
"""
import numpy
channels = train_obj.model.monitor.channels
train_cost = channels['sgd_cost(ExhaustiveSGD[X])']
best_epoch = numpy.argmin(train_cost.val_record)
best_rec_error = train_cost.val_record[best_epoch]
batch_num = train_cost.batch_record[best_epoch]
return dict(
best_epoch=best_epoch,
train_rec_error=best_rec_error,
batch_num=batch_num)
if __name__ == '__main__':
db = api0.open_db('sqlite:///test.db?table=test_jobman_pylearn2')
state = DD()
state.yaml_template = '''
!obj:pylearn2.train.Train {
"dataset": !obj:pylearn2.datasets.npy_npz.NpyDataset &dataset {
"file" : "%(file)s"
},
"model": !obj:pylearn2.autoencoder.ContractiveAutoencoder {
"nvis" : %(nvis)d,
"nhid" : %(nhid)d,
"irange" : 0.05,
"act_enc": "sigmoid", #for some reason only sigmoid function works
"act_dec": "sigmoid",
},
"algorithm": !obj:pylearn2.training_algorithms.sgd.SGD {
"learning_rate" : %(learning_rate)f,
"batch_size" : %(batch_size)d,
"monitoring_batches" : 5,
"monitoring_dataset" : *dataset,
"cost" : !obj:pylearn2.costs.cost.SumOfCosts {
"costs": [
[1.0, !obj:pylearn2.costs.autoencoder.MeanBinaryCrossEntropy {} ],
[%(coefficient)f, !obj:pylearn2.costs.cost.MethodCost { method: 'contraction_penalty' } ]
]
},
"termination_criterion" : %(term_crit)s,
}
}
'''
state.hyper_parameters = {
"file": "${PYLEARN2_DATA_PATH}/UTLC/pca/sylvester_train_x_pca32.npy",
"nvis": 32,
"nhid": 6,
"learning_rate": 0.1,
"batch_size": 10,
"coefficient": 0.5,
"term_crit": {
"__builder__": "pylearn2.training_algorithms.sgd.EpochCounter",
"max_epochs": 2
}
}
state.extract_results = "pylearn2.scripts.jobman.tester.result_extractor"
sql.insert_job(
experiment.train_experiment,
flatten(state),
db,
force_dup=True)
| bsd-3-clause | 07eb2e770d37237007527101bed2d2da | 31.226804 | 109 | 0.590211 | 3.647608 | false | true | false | false |
lisa-lab/pylearn2 | pylearn2/models/tests/test_dbm.py | 44 | 37752 | from __future__ import print_function
from pylearn2.models.dbm.dbm import DBM
from pylearn2.models.dbm.layer import BinaryVector, BinaryVectorMaxPool, Softmax, GaussianVisLayer
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "Devon Hjelm"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
import numpy as np
import random
assert hasattr(np, 'exp')
from theano.compat.six.moves import xrange
from theano import config
from theano import function
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano import tensor as T
from pylearn2.expr.basic import is_binary
from pylearn2.expr.nnet import inverse_sigmoid_numpy
from pylearn2.costs.dbm import VariationalCD
import pylearn2.testing.datasets as datasets
from pylearn2.space import VectorSpace
from pylearn2.utils import sharedX
from pylearn2.utils import safe_zip
from pylearn2.utils.data_specs import DataSpecsMapping
class DummyLayer(object):
"""
A layer that we build for the test that just uses a state
as its downward message.
"""
def downward_state(self, state):
return state
def downward_message(self, state):
return state
class DummyDBM(object):
"""
A dummy DBM for some of the tests below.
"""
def __init__(self, rng):
self.rng = rng
class TestBinaryVector:
"""
Testing class for DBM BinaryVector.
"""
def setUp(self):
pass
@staticmethod
def check_samples(value, expected_shape, expected_mean, tol):
"""
Tests that a matrix of binary samples (observations in rows, variables
in columns)
1) Has the right shape
2) Is binary
3) Converges to the right mean
"""
assert value.shape == expected_shape
assert is_binary(value)
mean = value.mean(axis=0)
max_error = np.abs(mean-expected_mean).max()
print('Actual mean:')
print(mean)
print('Expected mean:')
print(expected_mean)
print('Maximal error:', max_error)
if max_error > tol:
raise ValueError("Samples don't seem to have the right mean.")
def test_make_state(self):
# Verifies that BinaryVector.make_state creates
# a shared variable whose value passes check_samples
n = 5
num_samples = 1000
tol = .04
layer = BinaryVector(nvis = n)
rng = np.random.RandomState([2012,11,1])
mean = rng.uniform(1e-6, 1. - 1e-6, (n,))
z = inverse_sigmoid_numpy(mean)
layer.set_biases(z.astype(config.floatX))
init_state = layer.make_state(num_examples=num_samples,
numpy_rng=rng)
value = init_state.get_value()
TestBinaryVector.check_samples(value, (num_samples, n), mean, tol)
def test_sample(self):
# Verifies that BinaryVector.sample returns an expression
# whose value passes check_samples
assert hasattr(np, 'exp')
n = 5
num_samples = 1000
tol = .04
vis = BinaryVector(nvis=n)
hid = DummyLayer()
rng = np.random.RandomState([2012,11,1,259])
mean = rng.uniform(1e-6, 1. - 1e-6, (n,))
ofs = rng.randn(n)
vis.set_biases(ofs.astype(config.floatX))
z = inverse_sigmoid_numpy(mean) - ofs
z_var = sharedX(np.zeros((num_samples, n)) + z)
theano_rng = MRG_RandomStreams(2012+11+1)
sample = vis.sample(state_above=z_var, layer_above=hid,
theano_rng=theano_rng)
sample = sample.eval()
TestBinaryVector.check_samples(sample, (num_samples, n), mean, tol)
def check_gaussian_samples(value, nsamples, nvis, rows, cols, channels, expected_mean, tol):
"""
Tests that a matrix of Gaussian samples (observations in rows, variables
in columns)
1) Has the right shape
2) Is not binary
3) Converges to the right mean
"""
if nvis:
expected_shape = (nsamples, nvis)
else:
expected_shape = (nsamples,rows,cols,channels)
assert value.shape == expected_shape
assert not is_binary(value)
mean = value.mean(axis=0)
max_error = np.abs(mean-expected_mean).max()
print('Actual mean:')
print(mean)
print('Expected mean:')
print(expected_mean)
print('Maximal error:', max_error)
print('Tolerable variance:', tol)
if max_error > tol:
raise ValueError("Samples don't seem to have the right mean.")
else:
print('Mean is within expected range')
def test_gaussian_vis_layer_make_state():
"""
Verifies that GaussianVisLayer.make_state creates
a shared variable whose value passes check_gaussian_samples
In this case the layer lives in a VectorSpace
"""
n = 5
rows = None
cols = None
channels = None
num_samples = 1000
tol = .042 # tolerated variance
beta = 1/tol # precision parameter
layer = GaussianVisLayer(nvis = n, init_beta=beta)
rng = np.random.RandomState([2012,11,1])
mean = rng.uniform(1e-6, 1. - 1e-6, (n,))
z= mean
layer.set_biases(z.astype(config.floatX))
init_state = layer.make_state(num_examples=num_samples,
numpy_rng=rng)
value = init_state.get_value()
check_gaussian_samples(value, num_samples, n, rows, cols, channels, mean, tol)
def test_gaussian_vis_layer_make_state_conv():
"""
Verifies that GaussianVisLayer.make_state creates
a shared variable whose value passes check_gaussian_samples
In this case the layer lives in a Conv2DSpace
"""
n = None
rows = 3
cols = 3
channels = 3
num_samples = 1000
tol = .042 # tolerated variance
beta = 1/tol # precision parameter
# axes for batch, rows, cols, channels, can be given in any order
axes = ['b', 0, 1, 'c']
random.shuffle(axes)
axes = tuple(axes)
print('axes:', axes)
layer = GaussianVisLayer(rows=rows, cols=cols, channels=channels, init_beta=beta, axes=axes)
# rng = np.random.RandomState([2012,11,1])
rng = np.random.RandomState()
mean = rng.uniform(1e-6, 1. - 1e-6, (rows, cols, channels))
#z = inverse_sigmoid_numpy(mean)
z= mean
layer.set_biases(z.astype(config.floatX))
init_state = layer.make_state(num_examples=num_samples,
numpy_rng=rng)
value = init_state.get_value()
check_gaussian_samples(value, num_samples, n, rows, cols, channels, mean, tol)
def test_gaussian_vis_layer_sample():
"""
Verifies that GaussianVisLayer.sample returns an expression
whose value passes check_gaussian_samples
In this case the layer lives in a VectorSpace
"""
assert hasattr(np, 'exp')
n = 5
num_samples = 1000
tol = .042 # tolerated variance
beta = 1/tol # precision parameter
rows = None
cols = None
channels = None
class DummyLayer(object):
"""
A layer that we build for the test that just uses a state
as its downward message.
"""
def downward_state(self, state):
return state
def downward_message(self, state):
return state
vis = GaussianVisLayer(nvis=n, init_beta=beta)
hid = DummyLayer()
rng = np.random.RandomState([2012,11,1,259])
mean = rng.uniform(1e-6, 1. - 1e-6, (n,))
ofs = rng.randn(n)
vis.set_biases(ofs.astype(config.floatX))
#z = inverse_sigmoid_numpy(mean) - ofs
z=mean -ofs # linear activation function
z_var = sharedX(np.zeros((num_samples, n)) + z)
# mean will be z_var + mu
theano_rng = MRG_RandomStreams(2012+11+1)
sample = vis.sample(state_above=z_var, layer_above=hid,
theano_rng=theano_rng)
sample = sample.eval()
check_gaussian_samples(sample, num_samples, n, rows, cols, channels, mean, tol)
def test_gaussian_vis_layer_sample_conv():
"""
Verifies that GaussianVisLayer.sample returns an expression
whose value passes check_gaussian_samples.
In this case the layer lives in a Conv2DSpace
"""
assert hasattr(np, 'exp')
n = None
num_samples = 1000
tol = .042 # tolerated variance
beta = 1/tol # precision parameter
rows = 3
cols = 3
channels = 3
# axes for batch, rows, cols, channels, can be given in any order
axes = ['b', 0, 1, 'c']
random.shuffle(axes)
axes = tuple(axes)
print('axes:', axes)
class DummyLayer(object):
"""
A layer that we build for the test that just uses a state
as its downward message.
"""
def downward_state(self, state):
return state
def downward_message(self, state):
return state
vis = GaussianVisLayer(nvis=None,rows=rows, cols=cols, channels=channels, init_beta=beta, axes=axes)
hid = DummyLayer()
rng = np.random.RandomState([2012,11,1,259])
mean = rng.uniform(1e-6, 1. - 1e-6, (rows, cols, channels))
ofs = rng.randn(rows,cols,channels)
vis.set_biases(ofs.astype(config.floatX))
#z = inverse_sigmoid_numpy(mean) - ofs
z = mean -ofs
z_var = sharedX(np.zeros((num_samples, rows, cols, channels)) + z)
theano_rng = MRG_RandomStreams(2012+11+1)
sample = vis.sample(state_above=z_var, layer_above=hid,
theano_rng=theano_rng)
sample = sample.eval()
check_gaussian_samples(sample, num_samples, n, rows, cols, channels, mean, tol)
def check_bvmp_samples(value, num_samples, n, pool_size, mean, tol):
"""
bvmp=BinaryVectorMaxPool
value: a tuple giving (pooled batch, detector batch) (all made with same params)
num_samples: number of samples there should be in the batch
n: detector layer dimension
pool_size: size of each pool region
mean: (expected value of pool unit, expected value of detector units)
tol: amount the emprical mean is allowed to deviate from the analytical expectation
checks that:
1) all values are binary
2) detector layer units are mutually exclusive
3) pooled unit is max of the detector units
4) correct number of samples is present
5) variables are of the right shapes
6) samples converge to the right expected value
"""
pv, hv = value
assert n % pool_size == 0
num_pools = n // pool_size
assert pv.ndim == 2
assert pv.shape[0] == num_samples
assert pv.shape[1] == num_pools
assert hv.ndim == 2
assert hv.shape[0] == num_samples
assert hv.shape[1] == n
assert is_binary(pv)
assert is_binary(hv)
for i in xrange(num_pools):
sub_p = pv[:,i]
assert sub_p.shape == (num_samples,)
sub_h = hv[:,i*pool_size:(i+1)*pool_size]
assert sub_h.shape == (num_samples, pool_size)
if not np.all(sub_p == sub_h.max(axis=1)):
for j in xrange(num_samples):
print(sub_p[j], sub_h[j,:])
assert sub_p[j] == sub_h[j,:]
assert False
assert np.max(sub_h.sum(axis=1)) == 1
p, h = mean
assert p.ndim == 1
assert h.ndim == 1
emp_p = pv.mean(axis=0)
emp_h = hv.mean(axis=0)
max_diff = np.abs(p - emp_p).max()
if max_diff > tol:
print('expected value of pooling units: ',p)
print('empirical expectation: ',emp_p)
print('maximum difference: ',max_diff)
raise ValueError("Pooling unit samples have an unlikely mean.")
max_diff = np.abs(h - emp_h).max()
if max_diff > tol:
assert False
def test_bvmp_make_state():
# Verifies that BinaryVector.make_state creates
# a shared variable whose value passes check_binary_samples
num_pools = 3
num_samples = 1000
tol = .04
rng = np.random.RandomState([2012,11,1,9])
# pool_size=1 is an important corner case
for pool_size in [1, 2, 5]:
n = num_pools * pool_size
layer = BinaryVectorMaxPool(
detector_layer_dim=n,
layer_name='h',
irange=1.,
pool_size=pool_size)
# This is just to placate mf_update below
input_space = VectorSpace(1)
class DummyDBM(object):
def __init__(self):
self.rng = rng
layer.set_dbm(DummyDBM())
layer.set_input_space(input_space)
layer.set_biases(rng.uniform(-pool_size, 1., (n,)).astype(config.floatX))
# To find the mean of the samples, we use mean field with an input of 0
mean = layer.mf_update(
state_below=T.alloc(0., 1, 1),
state_above=None,
layer_above=None)
mean = function([], mean)()
mean = [ mn[0,:] for mn in mean ]
state = layer.make_state(num_examples=num_samples,
numpy_rng=rng)
value = [elem.get_value() for elem in state]
check_bvmp_samples(value, num_samples, n, pool_size, mean, tol)
def make_random_basic_binary_dbm(
rng,
pool_size_1,
num_vis = None,
num_pool_1 = None,
num_pool_2 = None,
pool_size_2 = None,
center = False
):
"""
Makes a DBM with BinaryVector for the visible layer,
and two hidden layers of type BinaryVectorMaxPool.
The weights and biases are initialized randomly with
somewhat large values (i.e., not what you'd want to
use for learning)
rng: A numpy RandomState.
pool_size_1: The size of the pools to use in the first
layer.
"""
if num_vis is None:
num_vis = rng.randint(1,11)
if num_pool_1 is None:
num_pool_1 = rng.randint(1,11)
if num_pool_2 is None:
num_pool_2 = rng.randint(1,11)
if pool_size_2 is None:
pool_size_2 = rng.randint(1,6)
num_h1 = num_pool_1 * pool_size_1
num_h2 = num_pool_2 * pool_size_2
v = BinaryVector(num_vis, center=center)
v.set_biases(rng.uniform(-1., 1., (num_vis,)).astype(config.floatX), recenter=center)
h1 = BinaryVectorMaxPool(
detector_layer_dim = num_h1,
pool_size = pool_size_1,
layer_name = 'h1',
center = center,
irange = 1.)
h1.set_biases(rng.uniform(-1., 1., (num_h1,)).astype(config.floatX), recenter=center)
h2 = BinaryVectorMaxPool(
center = center,
detector_layer_dim = num_h2,
pool_size = pool_size_2,
layer_name = 'h2',
irange = 1.)
h2.set_biases(rng.uniform(-1., 1., (num_h2,)).astype(config.floatX), recenter=center)
dbm = DBM(visible_layer = v,
hidden_layers = [h1, h2],
batch_size = 1,
niter = 50)
return dbm
def test_bvmp_mf_energy_consistent():
# A test of the BinaryVectorMaxPool class
# Verifies that the mean field update is consistent with
# the energy function
# Specifically, in a DBM consisting of (v, h1, h2), the
# lack of intra-layer connections means that
# P(h1|v, h2) is factorial so mf_update tells us the true
# conditional.
# We also know P(h1[i] | h1[-i], v)
# = P(h, v) / P(h[-i], v)
# = P(h, v) / sum_h[i] P(h, v)
# = exp(-E(h, v)) / sum_h[i] exp(-E(h, v))
# So we can check that computing P(h[i] | v) with both
# methods works the same way
rng = np.random.RandomState([2012,11,1,613])
def do_test(pool_size_1):
# Make DBM and read out its pieces
dbm = make_random_basic_binary_dbm(
rng = rng,
pool_size_1 = pool_size_1,
)
v = dbm.visible_layer
h1, h2 = dbm.hidden_layers
num_p = h1.get_output_space().dim
# Choose which unit we will test
p_idx = rng.randint(num_p)
# Randomly pick a v, h1[-p_idx], and h2 to condition on
# (Random numbers are generated via dbm.rng)
layer_to_state = dbm.make_layer_to_state(1)
v_state = layer_to_state[v]
h1_state = layer_to_state[h1]
h2_state = layer_to_state[h2]
# Debugging checks
num_h = h1.detector_layer_dim
assert num_p * pool_size_1 == num_h
pv, hv = h1_state
assert pv.get_value().shape == (1, num_p)
assert hv.get_value().shape == (1, num_h)
# Infer P(h1[i] | h2, v) using mean field
expected_p, expected_h = h1.mf_update(
state_below = v.upward_state(v_state),
state_above = h2.downward_state(h2_state),
layer_above = h2)
expected_p = expected_p[0, p_idx]
expected_h = expected_h[0, p_idx * pool_size : (p_idx + 1) * pool_size]
expected_p, expected_h = function([], [expected_p, expected_h])()
# Infer P(h1[i] | h2, v) using the energy function
energy = dbm.energy(V = v_state,
hidden = [h1_state, h2_state])
unnormalized_prob = T.exp(-energy)
assert unnormalized_prob.ndim == 1
unnormalized_prob = unnormalized_prob[0]
unnormalized_prob = function([], unnormalized_prob)
p_state, h_state = h1_state
def compute_unnormalized_prob(which_detector):
write_h = np.zeros((pool_size_1,))
if which_detector is None:
write_p = 0.
else:
write_p = 1.
write_h[which_detector] = 1.
h_value = h_state.get_value()
p_value = p_state.get_value()
h_value[0, p_idx * pool_size : (p_idx + 1) * pool_size] = write_h
p_value[0, p_idx] = write_p
h_state.set_value(h_value)
p_state.set_value(p_value)
return unnormalized_prob()
off_prob = compute_unnormalized_prob(None)
on_probs = [compute_unnormalized_prob(idx) for idx in xrange(pool_size)]
denom = off_prob + sum(on_probs)
off_prob /= denom
on_probs = [on_prob / denom for on_prob in on_probs]
assert np.allclose(1., off_prob + sum(on_probs))
# np.asarray(on_probs) doesn't make a numpy vector, so I do it manually
wtf_numpy = np.zeros((pool_size_1,))
for i in xrange(pool_size_1):
wtf_numpy[i] = on_probs[i]
on_probs = wtf_numpy
# Check that they match
if not np.allclose(expected_p, 1. - off_prob):
print('mean field expectation of p:',expected_p)
print('expectation of p based on enumerating energy function values:',1. - off_prob)
print('pool_size_1:',pool_size_1)
assert False
if not np.allclose(expected_h, on_probs):
print('mean field expectation of h:',expected_h)
print('expectation of h based on enumerating energy function values:',on_probs)
assert False
# 1 is an important corner case
# We must also run with a larger number to test the general case
for pool_size in [1, 2, 5]:
do_test(pool_size)
def test_bvmp_mf_energy_consistent_center():
"""
A test of the BinaryVectorMaxPool class
Verifies that the mean field update is consistent with
the energy function when using Gregoire Montavon's centering
trick.
Specifically, in a DBM consisting of (v, h1, h2), the
lack of intra-layer connections means that
P(h1|v, h2) is factorial so mf_update tells us the true
conditional.
We also know P(h1[i] | h1[-i], v)
= P(h, v) / P(h[-i], v)
= P(h, v) / sum_h[i] P(h, v)
= exp(-E(h, v)) / sum_h[i] exp(-E(h, v))
So we can check that computing P(h[i] | v) with both
methods works the same way
:return:
"""
rng = np.random.RandomState([2012,11,1,613])
def do_test(pool_size_1):
# Make DBM and read out its pieces
dbm = make_random_basic_binary_dbm(
rng = rng,
pool_size_1 = pool_size_1,
pool_size_2 = 1, # centering is only updated for pool size 1
center = True
)
v = dbm.visible_layer
h1, h2 = dbm.hidden_layers
num_p = h1.get_output_space().dim
# Choose which unit we will test
p_idx = rng.randint(num_p)
# Randomly pick a v, h1[-p_idx], and h2 to condition on
# (Random numbers are generated via dbm.rng)
layer_to_state = dbm.make_layer_to_state(1)
v_state = layer_to_state[v]
h1_state = layer_to_state[h1]
h2_state = layer_to_state[h2]
# Debugging checks
num_h = h1.detector_layer_dim
assert num_p * pool_size_1 == num_h
pv, hv = h1_state
assert pv.get_value().shape == (1, num_p)
assert hv.get_value().shape == (1, num_h)
# Infer P(h1[i] | h2, v) using mean field
expected_p, expected_h = h1.mf_update(
state_below = v.upward_state(v_state),
state_above = h2.downward_state(h2_state),
layer_above = h2)
expected_p = expected_p[0, p_idx]
expected_h = expected_h[0, p_idx * pool_size_1 : (p_idx + 1) * pool_size_1]
expected_p, expected_h = function([], [expected_p, expected_h])()
# Infer P(h1[i] | h2, v) using the energy function
energy = dbm.energy(V = v_state,
hidden = [h1_state, h2_state])
unnormalized_prob = T.exp(-energy)
assert unnormalized_prob.ndim == 1
unnormalized_prob = unnormalized_prob[0]
unnormalized_prob = function([], unnormalized_prob)
p_state, h_state = h1_state
def compute_unnormalized_prob(which_detector):
write_h = np.zeros((pool_size_1,))
if which_detector is None:
write_p = 0.
else:
write_p = 1.
write_h[which_detector] = 1.
h_value = h_state.get_value()
p_value = p_state.get_value()
h_value[0, p_idx * pool_size_1 : (p_idx + 1) * pool_size_1] = write_h
p_value[0, p_idx] = write_p
h_state.set_value(h_value)
p_state.set_value(p_value)
return unnormalized_prob()
off_prob = compute_unnormalized_prob(None)
on_probs = [compute_unnormalized_prob(idx) for idx in xrange(pool_size_1)]
denom = off_prob + sum(on_probs)
off_prob /= denom
on_probs = [on_prob / denom for on_prob in on_probs]
assert np.allclose(1., off_prob + sum(on_probs))
# np.asarray(on_probs) doesn't make a numpy vector, so I do it manually
wtf_numpy = np.zeros((pool_size_1,))
for i in xrange(pool_size_1):
wtf_numpy[i] = on_probs[i]
on_probs = wtf_numpy
# Check that they match
if not np.allclose(expected_p, 1. - off_prob):
print('mean field expectation of p:',expected_p)
print('expectation of p based on enumerating energy function values:',1. - off_prob)
print('pool_size_1:',pool_size_1)
assert False
if not np.allclose(expected_h, on_probs):
print('mean field expectation of h:',expected_h)
print('expectation of h based on enumerating energy function values:',on_probs)
assert False
# 1 is the only pool size for which centering is implemented
do_test(1)
def test_bvmp_mf_sample_consistent():
# A test of the BinaryVectorMaxPool class
# Verifies that the mean field update is consistent with
# the sampling function
# Specifically, in a DBM consisting of (v, h1, h2), the
# lack of intra-layer connections means that
# P(h1|v, h2) is factorial so mf_update tells us the true
# conditional.
# We can thus use mf_update to compute the expected value
# of a sample of h1 from v and h2, and check that samples
# drawn using the layer's sample method convert to that
# value.
rng = np.random.RandomState([2012,11,1,1016])
theano_rng = MRG_RandomStreams(2012+11+1+1036)
num_samples = 1000
tol = .042
def do_test(pool_size_1):
# Make DBM and read out its pieces
dbm = make_random_basic_binary_dbm(
rng = rng,
pool_size_1 = pool_size_1,
)
v = dbm.visible_layer
h1, h2 = dbm.hidden_layers
num_p = h1.get_output_space().dim
# Choose which unit we will test
p_idx = rng.randint(num_p)
# Randomly pick a v, h1[-p_idx], and h2 to condition on
# (Random numbers are generated via dbm.rng)
layer_to_state = dbm.make_layer_to_state(1)
v_state = layer_to_state[v]
h1_state = layer_to_state[h1]
h2_state = layer_to_state[h2]
# Debugging checks
num_h = h1.detector_layer_dim
assert num_p * pool_size_1 == num_h
pv, hv = h1_state
assert pv.get_value().shape == (1, num_p)
assert hv.get_value().shape == (1, num_h)
# Infer P(h1[i] | h2, v) using mean field
expected_p, expected_h = h1.mf_update(
state_below = v.upward_state(v_state),
state_above = h2.downward_state(h2_state),
layer_above = h2)
expected_p = expected_p[0, :]
expected_h = expected_h[0, :]
expected_p, expected_h = function([], [expected_p, expected_h])()
# copy all the states out into a batch size of num_samples
cause_copy = sharedX(np.zeros((num_samples,))).dimshuffle(0,'x')
v_state = v_state[0,:] + cause_copy
p, h = h1_state
h1_state = (p[0,:] + cause_copy, h[0,:] + cause_copy)
p, h = h2_state
h2_state = (p[0,:] + cause_copy, h[0,:] + cause_copy)
h1_samples = h1.sample(state_below = v.upward_state(v_state),
state_above = h2.downward_state(h2_state),
layer_above = h2, theano_rng = theano_rng)
h1_samples = function([], h1_samples)()
check_bvmp_samples(h1_samples, num_samples, num_h, pool_size, (expected_p, expected_h), tol)
# 1 is an important corner case
# We must also run with a larger number to test the general case
for pool_size in [1, 2, 5]:
do_test(pool_size)
def check_multinomial_samples(value, expected_shape, expected_mean, tol):
"""
Tests that a matrix of multinomial samples (observations in rows, variables
in columns)
1) Has the right shape
2) Is binary
3) Has one 1 per row
4) Converges to the right mean
"""
assert value.shape == expected_shape
assert is_binary(value)
assert np.all(value.sum(axis=1) == 1)
mean = value.mean(axis=0)
max_error = np.abs(mean-expected_mean).max()
if max_error > tol:
print('Actual mean:')
print(mean)
print('Expected mean:')
print(expected_mean)
print('Maximal error:', max_error)
raise ValueError("Samples don't seem to have the right mean.")
def test_softmax_make_state():
# Verifies that BinaryVector.make_state creates
# a shared variable whose value passes check_multinomial_samples
n = 5
num_samples = 1000
tol = .04
layer = Softmax(n_classes = n, layer_name = 'y')
rng = np.random.RandomState([2012, 11, 1, 11])
z = 3 * rng.randn(n)
mean = np.exp(z)
mean /= mean.sum()
layer.set_biases(z.astype(config.floatX))
state = layer.make_state(num_examples=num_samples,
numpy_rng=rng)
value = state.get_value()
check_multinomial_samples(value, (num_samples, n), mean, tol)
def test_softmax_mf_energy_consistent():
# A test of the Softmax class
# Verifies that the mean field update is consistent with
# the energy function
# Since a Softmax layer contains only one random variable
# (with n_classes possible values) the mean field assumption
# does not impose any restriction so mf_update simply gives
# the true expected value of h given v.
# We also know P(h | v)
# = P(h, v) / P( v)
# = P(h, v) / sum_h P(h, v)
# = exp(-E(h, v)) / sum_h exp(-E(h, v))
# So we can check that computing P(h | v) with both
# methods works the same way
rng = np.random.RandomState([2012,11,1,1131])
# Make DBM
num_vis = rng.randint(1,11)
n_classes = rng.randint(1, 11)
v = BinaryVector(num_vis)
v.set_biases(rng.uniform(-1., 1., (num_vis,)).astype(config.floatX))
y = Softmax(
n_classes = n_classes,
layer_name = 'y',
irange = 1.)
y.set_biases(rng.uniform(-1., 1., (n_classes,)).astype(config.floatX))
dbm = DBM(visible_layer = v,
hidden_layers = [y],
batch_size = 1,
niter = 50)
# Randomly pick a v to condition on
# (Random numbers are generated via dbm.rng)
layer_to_state = dbm.make_layer_to_state(1)
v_state = layer_to_state[v]
y_state = layer_to_state[y]
# Infer P(y | v) using mean field
expected_y = y.mf_update(
state_below = v.upward_state(v_state))
expected_y = expected_y[0, :]
expected_y = expected_y.eval()
# Infer P(y | v) using the energy function
energy = dbm.energy(V = v_state,
hidden = [y_state])
unnormalized_prob = T.exp(-energy)
assert unnormalized_prob.ndim == 1
unnormalized_prob = unnormalized_prob[0]
unnormalized_prob = function([], unnormalized_prob)
def compute_unnormalized_prob(which):
write_y = np.zeros((n_classes,))
write_y[which] = 1.
y_value = y_state.get_value()
y_value[0, :] = write_y
y_state.set_value(y_value)
return unnormalized_prob()
probs = [compute_unnormalized_prob(idx) for idx in xrange(n_classes)]
denom = sum(probs)
probs = [on_prob / denom for on_prob in probs]
# np.asarray(probs) doesn't make a numpy vector, so I do it manually
wtf_numpy = np.zeros((n_classes,))
for i in xrange(n_classes):
wtf_numpy[i] = probs[i]
probs = wtf_numpy
if not np.allclose(expected_y, probs):
print('mean field expectation of h:',expected_y)
print('expectation of h based on enumerating energy function values:',probs)
assert False
def test_softmax_mf_energy_consistent_centering():
# A test of the Softmax class
# Verifies that the mean field update is consistent with
# the energy function when using the centering trick
# Since a Softmax layer contains only one random variable
# (with n_classes possible values) the mean field assumption
# does not impose any restriction so mf_update simply gives
# the true expected value of h given v.
# We also know P(h | v)
# = P(h, v) / P( v)
# = P(h, v) / sum_h P(h, v)
# = exp(-E(h, v)) / sum_h exp(-E(h, v))
# So we can check that computing P(h | v) with both
# methods works the same way
rng = np.random.RandomState([2012,11,1,1131])
# Make DBM
num_vis = rng.randint(1,11)
n_classes = rng.randint(1, 11)
v = BinaryVector(num_vis, center=True)
v.set_biases(rng.uniform(-1., 1., (num_vis,)).astype(config.floatX), recenter=True)
y = Softmax(
n_classes = n_classes,
layer_name = 'y',
irange = 1., center=True)
y.set_biases(rng.uniform(-1., 1., (n_classes,)).astype(config.floatX), recenter=True)
dbm = DBM(visible_layer = v,
hidden_layers = [y],
batch_size = 1,
niter = 50)
# Randomly pick a v to condition on
# (Random numbers are generated via dbm.rng)
layer_to_state = dbm.make_layer_to_state(1)
v_state = layer_to_state[v]
y_state = layer_to_state[y]
# Infer P(y | v) using mean field
expected_y = y.mf_update(
state_below = v.upward_state(v_state))
expected_y = expected_y[0, :]
expected_y = expected_y.eval()
# Infer P(y | v) using the energy function
energy = dbm.energy(V = v_state,
hidden = [y_state])
unnormalized_prob = T.exp(-energy)
assert unnormalized_prob.ndim == 1
unnormalized_prob = unnormalized_prob[0]
unnormalized_prob = function([], unnormalized_prob)
def compute_unnormalized_prob(which):
write_y = np.zeros((n_classes,))
write_y[which] = 1.
y_value = y_state.get_value()
y_value[0, :] = write_y
y_state.set_value(y_value)
return unnormalized_prob()
probs = [compute_unnormalized_prob(idx) for idx in xrange(n_classes)]
denom = sum(probs)
probs = [on_prob / denom for on_prob in probs]
# np.asarray(probs) doesn't make a numpy vector, so I do it manually
wtf_numpy = np.zeros((n_classes,))
for i in xrange(n_classes):
wtf_numpy[i] = probs[i]
probs = wtf_numpy
if not np.allclose(expected_y, probs):
print('mean field expectation of h:',expected_y)
print('expectation of h based on enumerating energy function values:',probs)
assert False
def test_softmax_mf_sample_consistent():
# A test of the Softmax class
# Verifies that the mean field update is consistent with
# the sampling function
# Since a Softmax layer contains only one random variable
# (with n_classes possible values) the mean field assumption
# does not impose any restriction so mf_update simply gives
# the true expected value of h given v.
# We can thus use mf_update to compute the expected value
# of a sample of y conditioned on v, and check that samples
# drawn using the layer's sample method convert to that
# value.
rng = np.random.RandomState([2012,11,1,1154])
theano_rng = MRG_RandomStreams(2012+11+1+1154)
num_samples = 1000
tol = .042
# Make DBM
num_vis = rng.randint(1,11)
n_classes = rng.randint(1, 11)
v = BinaryVector(num_vis)
v.set_biases(rng.uniform(-1., 1., (num_vis,)).astype(config.floatX))
y = Softmax(
n_classes = n_classes,
layer_name = 'y',
irange = 1.)
y.set_biases(rng.uniform(-1., 1., (n_classes,)).astype(config.floatX))
dbm = DBM(visible_layer = v,
hidden_layers = [y],
batch_size = 1,
niter = 50)
# Randomly pick a v to condition on
# (Random numbers are generated via dbm.rng)
layer_to_state = dbm.make_layer_to_state(1)
v_state = layer_to_state[v]
y_state = layer_to_state[y]
# Infer P(y | v) using mean field
expected_y = y.mf_update(
state_below = v.upward_state(v_state))
expected_y = expected_y[0, :]
expected_y = expected_y.eval()
# copy all the states out into a batch size of num_samples
cause_copy = sharedX(np.zeros((num_samples,))).dimshuffle(0,'x')
v_state = v_state[0,:] + cause_copy
y_state = y_state[0,:] + cause_copy
y_samples = y.sample(state_below = v.upward_state(v_state), theano_rng=theano_rng)
y_samples = function([], y_samples)()
check_multinomial_samples(y_samples, (num_samples, n_classes), expected_y, tol)
def test_make_symbolic_state():
# Tests whether the returned p_sample and h_sample have the right
# dimensions
num_examples = 40
theano_rng = MRG_RandomStreams(2012+11+1)
visible_layer = BinaryVector(nvis=100)
rval = visible_layer.make_symbolic_state(num_examples=num_examples,
theano_rng=theano_rng)
hidden_layer = BinaryVectorMaxPool(detector_layer_dim=500,
pool_size=1,
layer_name='h',
irange=0.05,
init_bias=-2.0)
p_sample, h_sample = hidden_layer.make_symbolic_state(num_examples=num_examples,
theano_rng=theano_rng)
softmax_layer = Softmax(n_classes=10, layer_name='s', irange=0.05)
h_sample_s = softmax_layer.make_symbolic_state(num_examples=num_examples,
theano_rng=theano_rng)
required_shapes = [(40, 100), (40, 500), (40, 500), (40, 10)]
f = function(inputs=[],
outputs=[rval, p_sample, h_sample, h_sample_s])
for s, r in zip(f(), required_shapes):
assert s.shape == r
def test_variational_cd():
# Verifies that VariationalCD works well with make_layer_to_symbolic_state
visible_layer = BinaryVector(nvis=100)
hidden_layer = BinaryVectorMaxPool(detector_layer_dim=500,
pool_size=1,
layer_name='h',
irange=0.05,
init_bias=-2.0)
model = DBM(visible_layer=visible_layer,
hidden_layers=[hidden_layer],
batch_size=100,
niter=1)
cost = VariationalCD(num_chains=100, num_gibbs_steps=2)
data_specs = cost.get_data_specs(model)
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
theano_args = []
for space, source in safe_zip(space_tuple, source_tuple):
name = '%s' % (source)
arg = space.make_theano_batch(name=name)
theano_args.append(arg)
theano_args = tuple(theano_args)
nested_args = mapping.nest(theano_args)
grads, updates = cost.get_gradients(model, nested_args)
def test_extra():
"""
Test functionality that remains private, if available.
"""
try:
import galatea
except ImportError:
return
from galatea.dbm.pylearn2_bridge import run_unit_tests
run_unit_tests()
| bsd-3-clause | 2790db943f51af73e98a42f002ad8531 | 30.2 | 104 | 0.592896 | 3.465712 | false | false | false | false |
jupyterhub/oauthenticator | oauthenticator/tests/test_github.py | 1 | 7568 | import functools
import json
import logging
import re
from io import BytesIO
from urllib.parse import parse_qs, urlparse
from pytest import fixture, mark
from tornado.httpclient import HTTPResponse
from tornado.httputil import HTTPHeaders
from traitlets.config import Config
from ..github import GitHubOAuthenticator
from .mocks import setup_oauth_mock
def user_model(username):
"""Return a user model"""
return {
'email': 'dinosaurs@space',
'id': 5,
'login': username,
'name': 'Hoban Washburn',
}
@fixture
def github_client(client):
setup_oauth_mock(
client,
host=['github.com', 'api.github.com'],
access_token_path='/login/oauth/access_token',
user_path='/user',
token_type='token',
)
return client
async def test_github(github_client):
authenticator = GitHubOAuthenticator()
handler = github_client.handler_for_user(user_model('wash'))
user_info = await authenticator.authenticate(handler)
name = user_info['name']
assert name == 'wash'
auth_state = user_info['auth_state']
assert 'access_token' in auth_state
assert 'github_user' in auth_state
assert auth_state["github_user"] == {
'email': 'dinosaurs@space',
'id': 5,
'login': name,
'name': 'Hoban Washburn',
}
def make_link_header(urlinfo, page):
return {
'Link': '<{}://{}{}?page={}>;rel="next"'.format(
urlinfo.scheme, urlinfo.netloc, urlinfo.path, page
)
}
async def test_allowed_org_membership(github_client):
client = github_client
authenticator = GitHubOAuthenticator()
## Mock Github API
orgs = {
'red': ['grif', 'simmons', 'donut', 'sarge', 'lopez'],
'blue': ['tucker', 'caboose', 'burns', 'sheila', 'texas'],
}
org_teams = {'blue': {'alpha': ['tucker', 'caboose', 'burns']}}
member_regex = re.compile(r'/orgs/(.*)/members')
def org_members(paginate, request):
urlinfo = urlparse(request.url)
org = member_regex.match(urlinfo.path).group(1)
if org not in orgs:
return HTTPResponse(request, 404)
if not paginate:
return [user_model(m) for m in orgs[org]]
else:
page = parse_qs(urlinfo.query).get('page', ['1'])
page = int(page[0])
return org_members_paginated(
org, page, urlinfo, functools.partial(HTTPResponse, request)
)
def org_members_paginated(org, page, urlinfo, response):
if page < len(orgs[org]):
headers = make_link_header(urlinfo, page + 1)
elif page == len(orgs[org]):
headers = {}
else:
return response(400)
headers.update({'Content-Type': 'application/json'})
ret = [user_model(orgs[org][page - 1])]
return response(
200,
headers=HTTPHeaders(headers),
buffer=BytesIO(json.dumps(ret).encode('utf-8')),
)
org_membership_regex = re.compile(r'/orgs/(.*)/members/(.*)')
def org_membership(request):
urlinfo = urlparse(request.url)
urlmatch = org_membership_regex.match(urlinfo.path)
org = urlmatch.group(1)
username = urlmatch.group(2)
print('Request org = %s, username = %s' % (org, username))
if org not in orgs:
print('Org not found: org = %s' % (org))
return HTTPResponse(request, 404)
if username not in orgs[org]:
print('Member not found: org = %s, username = %s' % (org, username))
return HTTPResponse(request, 404)
return HTTPResponse(request, 204)
team_membership_regex = re.compile(r'/orgs/(.*)/teams/(.*)/members/(.*)')
def team_membership(request):
urlinfo = urlparse(request.url)
urlmatch = team_membership_regex.match(urlinfo.path)
org = urlmatch.group(1)
team = urlmatch.group(2)
username = urlmatch.group(3)
print('Request org = %s, team = %s username = %s' % (org, team, username))
if org not in orgs:
print('Org not found: org = %s' % (org))
return HTTPResponse(request, 404)
if team not in org_teams[org]:
print('Team not found in org: team = %s, org = %s' % (team, org))
return HTTPResponse(request, 404)
if username not in org_teams[org][team]:
print(
'Member not found: org = %s, team = %s, username = %s'
% (org, team, username)
)
return HTTPResponse(request, 404)
return HTTPResponse(request, 204)
## Perform tests
for paginate in (False, True):
client_hosts = client.hosts['api.github.com']
client_hosts.append((team_membership_regex, team_membership))
client_hosts.append((org_membership_regex, org_membership))
client_hosts.append((member_regex, functools.partial(org_members, paginate)))
authenticator.allowed_organizations = ['blue']
handler = client.handler_for_user(user_model('caboose'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'caboose'
handler = client.handler_for_user(user_model('donut'))
user = await authenticator.authenticate(handler)
assert user is None
# reverse it, just to be safe
authenticator.allowed_organizations = ['red']
handler = client.handler_for_user(user_model('caboose'))
user = await authenticator.authenticate(handler)
assert user is None
handler = client.handler_for_user(user_model('donut'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'donut'
# test team membership
authenticator.allowed_organizations = ['blue:alpha', 'red']
handler = client.handler_for_user(user_model('tucker'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'tucker'
handler = client.handler_for_user(user_model('grif'))
user = await authenticator.authenticate(handler)
assert user['name'] == 'grif'
handler = client.handler_for_user(user_model('texas'))
user = await authenticator.authenticate(handler)
assert user is None
client_hosts.pop()
client_hosts.pop()
@mark.parametrize(
"org, username, expected",
[
("blue", "texas", "https://api.github.com/orgs/blue/members/texas"),
(
"blue:alpha",
"tucker",
"https://api.github.com/orgs/blue/teams/alpha/members/tucker",
),
("red", "grif", "https://api.github.com/orgs/red/members/grif"),
],
)
def test_build_check_membership_url(org, username, expected):
output = GitHubOAuthenticator()._build_check_membership_url(org, username)
assert output == expected
def test_deprecated_config(caplog):
cfg = Config()
cfg.GitHubOAuthenticator.github_organization_whitelist = ["jupy"]
cfg.Authenticator.whitelist = {"user1"}
log = logging.getLogger("testlog")
authenticator = GitHubOAuthenticator(config=cfg, log=log)
assert (
log.name,
logging.WARNING,
'GitHubOAuthenticator.github_organization_whitelist is deprecated in GitHubOAuthenticator 0.12.0, use '
'GitHubOAuthenticator.allowed_organizations instead',
) in caplog.record_tuples
assert authenticator.allowed_organizations == {"jupy"}
assert authenticator.allowed_users == {"user1"}
| bsd-3-clause | 6c6986946a81f239131e8589e3de9262 | 31.62069 | 111 | 0.607294 | 3.782109 | false | false | false | false |
jupyterhub/oauthenticator | oauthenticator/globus.py | 1 | 14263 | """
Custom Authenticator to use Globus OAuth2 with JupyterHub
"""
import base64
import os
import pickle
import urllib
from jupyterhub.auth import LocalAuthenticator
from tornado.httpclient import HTTPRequest
from tornado.web import HTTPError
from traitlets import Bool, List, Set, Unicode, default
from .oauth2 import OAuthenticator, OAuthLogoutHandler
class GlobusLogoutHandler(OAuthLogoutHandler):
"""
Handle custom logout URLs and token revocation. If a custom logout url
is specified, the 'logout' button will log the user out of that identity
provider in addition to clearing the session with Jupyterhub, otherwise
only the Jupyterhub session is cleared.
"""
async def get(self):
# Ensure self.handle_logout() is called before self.default_handle_logout()
# If default_handle_logout() is called first, the user session is popped and
# it's not longer possible to call get_auth_state() to revoke tokens.
# See https://github.com/jupyterhub/jupyterhub/blob/HEAD/jupyterhub/handlers/login.py # noqa
await self.handle_logout()
await self.default_handle_logout()
if self.authenticator.logout_redirect_url:
# super().get() will attempt to render a logout page. Make sure we
# return after the redirect to avoid exceptions.
self.redirect(self.authenticator.logout_redirect_url)
return
await super().get()
async def handle_logout(self):
"""Overridden method for custom logout functionality. Should be called by
Jupyterhub on logout just before destroying the users session to log them out."""
await super().handle_logout()
if self.current_user and self.authenticator.revoke_tokens_on_logout:
await self.clear_tokens(self.current_user)
async def clear_tokens(self, user):
"""Revoke and clear user tokens from the database"""
state = await user.get_auth_state()
if state:
await self.authenticator.revoke_service_tokens(state.get('tokens'))
self.log.info(
'Logout: Revoked tokens for user "{}" services: {}'.format(
user.name, ','.join(state['tokens'].keys())
)
)
state['tokens'] = {}
await user.save_auth_state(state)
class GlobusOAuthenticator(OAuthenticator):
"""The Globus OAuthenticator handles both authorization and passing
transfer tokens to the spawner."""
login_service = 'Globus'
logout_handler = GlobusLogoutHandler
@default("user_auth_state_key")
def _user_auth_state_key_default(self):
return "globus_user"
@default("userdata_url")
def _userdata_url_default(self):
return "https://auth.globus.org/v2/oauth2/userinfo"
@default("authorize_url")
def _authorize_url_default(self):
return "https://auth.globus.org/v2/oauth2/authorize"
@default("revocation_url")
def _revocation_url_default(self):
return "https://auth.globus.org/v2/oauth2/token/revoke"
revocation_url = Unicode(help="Globus URL to revoke live tokens.").tag(config=True)
@default("token_url")
def _token_url_default(self):
return "https://auth.globus.org/v2/oauth2/token"
globus_groups_url = Unicode(help="Globus URL to get list of user's Groups.").tag(
config=True
)
@default("globus_groups_url")
def _globus_groups_url_default(self):
return "https://groups.api.globus.org/v2/groups/my_groups"
identity_provider = Unicode(
help="""Restrict which institution a user
can use to login (GlobusID, University of Hogwarts, etc.). This should
be set in the app at developers.globus.org, but this acts as an additional
check to prevent unnecessary account creation."""
).tag(config=True)
def _identity_provider_default(self):
return os.getenv('IDENTITY_PROVIDER', '')
username_from_email = Bool(
help="""Create username from email address, not preferred username. If
an identity provider is specified, email address must be from the same
domain. Email scope will be set automatically."""
).tag(config=True)
@default("username_from_email")
def _username_from_email_default(self):
return False
@default("username_claim")
def _username_claim_default(self):
if self.username_from_email:
return "email"
return "preferred_username"
exclude_tokens = List(
help="""Exclude tokens from being passed into user environments
when they start notebooks, Terminals, etc."""
).tag(config=True)
def _exclude_tokens_default(self):
return ['auth.globus.org', 'groups.api.globus.org']
def _scope_default(self):
scopes = [
'openid',
'profile',
'urn:globus:auth:scope:transfer.api.globus.org:all',
]
if self.allowed_globus_groups or self.admin_globus_groups:
scopes.append(
'urn:globus:auth:scope:groups.api.globus.org:view_my_groups_and_memberships'
)
if self.username_from_email:
scopes.append('email')
return scopes
globus_local_endpoint = Unicode(
help="""If Jupyterhub is also a Globus
endpoint, its endpoint id can be specified here."""
).tag(config=True)
def _globus_local_endpoint_default(self):
return os.getenv('GLOBUS_LOCAL_ENDPOINT', '')
revoke_tokens_on_logout = Bool(
help="""Revoke tokens so they cannot be used again. Single-user servers
MUST be restarted after logout in order to get a fresh working set of
tokens."""
).tag(config=True)
def _revoke_tokens_on_logout_default(self):
return False
allowed_globus_groups = Set(
help="""Allow members of defined Globus Groups to access JupyterHub. Users in an
admin Globus Group are also automatically allowed. Groups are specified with their UUIDs. Setting this will
add the Globus Groups scope."""
).tag(config=True)
admin_globus_groups = Set(
help="""Set members of defined Globus Groups as JupyterHub admin users.
These users are automatically allowed to login to JupyterHub. Groups are specified with
their UUIDs. Setting this will add the Globus Groups scope."""
).tag(config=True)
@staticmethod
def check_user_in_groups(member_groups, allowed_groups):
return bool(set(member_groups) & set(allowed_groups))
async def pre_spawn_start(self, user, spawner):
"""Add tokens to the spawner whenever the spawner starts a notebook.
This will allow users to create a transfer client:
globus-sdk-python.readthedocs.io/en/stable/tutorial/#tutorial-step4
"""
spawner.environment['GLOBUS_LOCAL_ENDPOINT'] = self.globus_local_endpoint
state = await user.get_auth_state()
if state:
globus_data = base64.b64encode(pickle.dumps(state))
spawner.environment['GLOBUS_DATA'] = globus_data.decode('utf-8')
def get_globus_tokens(self, token_info):
# Each token should have these attributes. Resource server is optional,
# and likely won't be present.
token_attrs = [
'expires_in',
'resource_server',
'scope',
'token_type',
'refresh_token',
'access_token',
]
# The Auth Token is a bit special, it comes back at the top level with the
# id token. The id token has some useful information in it, but nothing that
# can't be retrieved with an Auth token.
# Repackage the Auth token into a dict that looks like the other tokens
auth_token_dict = {
attr_name: token_info.get(attr_name) for attr_name in token_attrs
}
# Make sure only the essentials make it into tokens. Other items, such as 'state' are
# not needed after authentication and can be discarded.
other_tokens = [
{attr_name: token_dict.get(attr_name) for attr_name in token_attrs}
for token_dict in token_info['other_tokens']
]
return other_tokens + [auth_token_dict]
def build_auth_state_dict(self, token_info, user_info):
"""
Usernames (and therefore Jupyterhub
accounts) will correspond to a Globus User ID, so foouser@globusid.org
will have the 'foouser' account in Jupyterhub.
"""
tokens = self.get_globus_tokens(token_info)
# historically, tokens have been organized by resource server for convenience.
# If multiple scopes are requested from the same resource server, they will be
# combined into a single token from Globus Auth.
by_resource_server = {
token_dict['resource_server']: token_dict
for token_dict in tokens
if token_dict['resource_server'] not in self.exclude_tokens
}
return {
'client_id': self.client_id,
'tokens': by_resource_server,
'token_response': token_info,
self.user_auth_state_key: user_info,
}
async def get_users_groups_ids(self, tokens):
user_group_ids = set()
# Get Groups access token, may not be in dict headed to auth state
for token_dict in tokens:
if token_dict['resource_server'] == 'groups.api.globus.org':
groups_token = token_dict['access_token']
# Get list of user's Groups
groups_headers = self.get_default_headers()
groups_headers['Authorization'] = 'Bearer {}'.format(groups_token)
req = HTTPRequest(self.globus_groups_url, method='GET', headers=groups_headers)
groups_resp = await self.fetch(req)
# Build set of Group IDs
for group in groups_resp:
user_group_ids.add(group['id'])
return user_group_ids
async def user_is_authorized(self, auth_model):
tokens = self.get_globus_tokens(auth_model["auth_state"]["token_response"])
if self.allowed_globus_groups or self.admin_globus_groups:
# If any of these configurations are set, user must be in the allowed or admin Globus Group
user_group_ids = await self.get_users_groups_ids(tokens)
if not self.check_user_in_groups(
user_group_ids, self.allowed_globus_groups
):
if not self.check_user_in_groups(
user_group_ids, self.admin_globus_groups
):
self.log.warning(
'{} not in an allowed Globus Group'.format(
self.user_info_to_username(
auth_model["auth_state"][self.user_auth_state_key]
)
)
)
return False
return True
async def update_auth_model(self, auth_model):
username = self.user_info_to_username(
auth_model["auth_state"][self.user_auth_state_key]
)
tokens = self.get_globus_tokens(auth_model["auth_state"]["token_response"])
if self.admin_globus_groups:
# If any of these configurations are set, user must be in the allowed or admin Globus Group
user_group_ids = await self.get_users_groups_ids(tokens)
# Admin users are being managed via Globus Groups
# Default to False
auth_model['admin'] = False
if self.check_user_in_groups(user_group_ids, self.admin_globus_groups):
auth_model['admin'] = True
return auth_model
def user_info_to_username(self, user_info):
"""
Usernames (and therefore Jupyterhub
accounts) will correspond to a Globus User ID, so foouser@globusid.org
will have the 'foouser' account in Jupyterhub.
"""
# It's possible for identity provider domains to be namespaced
# https://docs.globus.org/api/auth/specification/#identity_provider_namespaces # noqa
username, domain = user_info.get(self.username_claim).split('@', 1)
if self.identity_provider and domain != self.identity_provider:
raise HTTPError(
403,
'This site is restricted to {} accounts. Please link your {}'
' account at {}.'.format(
self.identity_provider,
self.identity_provider,
'globus.org/app/account',
),
)
return username
def get_default_headers(self):
return {"Accept": "application/json", "User-Agent": "JupyterHub"}
def get_client_credential_headers(self):
headers = self.get_default_headers()
b64key = base64.b64encode(
bytes("{}:{}".format(self.client_id, self.client_secret), "utf8")
)
headers["Authorization"] = "Basic {}".format(b64key.decode("utf8"))
return headers
async def revoke_service_tokens(self, services):
"""Revoke live Globus access and refresh tokens. Revoking inert or
non-existent tokens does nothing. Services are defined by dicts
returned by tokens.by_resource_server, for example:
services = { 'transfer.api.globus.org': {'access_token': 'token'}, ...
<Additional services>...
}
"""
access_tokens = [
token_dict.get('access_token') for token_dict in services.values()
]
refresh_tokens = [
token_dict.get('refresh_token') for token_dict in services.values()
]
all_tokens = [tok for tok in access_tokens + refresh_tokens if tok is not None]
for token in all_tokens:
req = HTTPRequest(
self.revocation_url,
method="POST",
headers=self.get_client_credential_headers(),
body=urllib.parse.urlencode({'token': token}),
)
await self.fetch(req)
class LocalGlobusOAuthenticator(LocalAuthenticator, GlobusOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| bsd-3-clause | ba777f7c10e3b84ae32c3c1ee9c05ce4 | 38.729805 | 115 | 0.62294 | 4.070491 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.