repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
unseenlaser/python-for-android | python-modules/twisted/twisted/scripts/test/test_mktap.py | 60 | 3509 | # Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.scripts.mktap}.
"""
import sys
try:
import pwd, grp
except ImportError:
pwd = None
from twisted.trial.unittest import TestCase
from twisted.scripts.mktap import run, getid, loadPlugins
from twisted.application.service import IProcess, loadApplication
from twisted.test.test_twistd import patchUserDatabase
from twisted.plugins.twisted_ftp import TwistedFTP
class RunTests(TestCase):
"""
Tests for L{twisted.scripts.mktap.run}.
"""
def setUp(self):
"""
Save the original value of L{sys.argv} so that tests can change it
as necessary.
"""
self.argv = sys.argv[:]
def tearDown(self):
"""
Restore the original value of L{sys.argv}.
"""
sys.argv[:] = self.argv
def _saveConfiguredIDTest(self, argv, uid, gid):
"""
Test that when L{run} is invoked and L{sys.argv} has the given
value, the resulting application has the specified UID and GID.
@type argv: C{list} of C{str}
@param argv: The value to which to set L{sys.argv} before calling L{run}.
@type uid: C{int}
@param uid: The expected value for the resulting application's
L{IProcess.uid}.
@type gid: C{int}
@param gid: The expected value for the resulting application's
L{IProcess.gid}.
"""
sys.argv = argv
run()
app = loadApplication("ftp.tap", "pickle", None)
process = IProcess(app)
self.assertEqual(process.uid, uid)
self.assertEqual(process.gid, gid)
def test_getNumericID(self):
"""
L{run} extracts numeric UID and GID information from the command
line and persists it with the application object.
"""
uid = 1234
gid = 4321
self._saveConfiguredIDTest(
["mktap", "--uid", str(uid), "--gid", str(gid), "ftp"],
uid, gid)
def test_getNameID(self):
"""
L{run} extracts name UID and GID information from the command
line and persists it with the application object.
"""
user = "foo"
uid = 1234
group = "bar"
gid = 4321
patchUserDatabase(self.patch, user, uid, group, gid)
self._saveConfiguredIDTest(
["mktap", "--uid", user, "--gid", group, "ftp"],
uid, gid)
if pwd is None:
test_getNameID.skip = (
"Username/UID Group name/GID translation requires pwd and grp "
"modules.")
class HelperTests(TestCase):
"""
Tests for miscellaneous utility functions related to mktap.
"""
def test_getid(self):
"""
L{getid} returns a two-tuple of integers giving the numeric values of
the strings it is passed.
"""
uid = 1234
gid = 4321
self.assertEqual(getid(str(uid), str(gid)), (uid, gid))
def test_loadPlugins(self):
"""
L{loadPlugins} returns a C{dict} mapping tap names to tap plugins.
"""
plugins = loadPlugins()
self.assertTrue(plugins, "There should be at least one plugin.")
# Make sure the mapping is set up properly.
for k, v in plugins.iteritems():
self.assertEqual(k, v.tapname)
# Make sure one of the always-available builtin plugins is there.
self.assertIdentical(plugins['ftp'], TwistedFTP)
| apache-2.0 |
skython/eXe | exe/webui/multiselectblock.py | 3 | 5253 | # ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
# Copyright 2006-2007 eXe Project, New Zealand Tertiary Education Commission
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
MultiSelectBlock can render and process MultiSelectIdevices as XHTML
"""
import logging
from exe.webui.block import Block
from exe.webui.element import SelectquestionElement
from exe.webui import common
log = logging.getLogger(__name__)
# ===========================================================================
class MultiSelectBlock(Block):
"""
MultiSelectBlock can render and process MultiSelectIdevices as XHTML
"""
def __init__(self, parent, idevice):
"""
Initialize a new Block object
"""
Block.__init__(self, parent, idevice)
self.idevice = idevice
self.questionElements = []
if not hasattr(self.idevice,'undo'):
self.idevice.undo = True
for question in idevice.questions:
self.questionElements.append(SelectquestionElement(question))
def process(self, request):
"""
Process the request arguments from the web server
"""
Block.process(self, request)
is_cancel = common.requestHasCancel(request)
if ("addQuestion"+self.id) in request.args:
self.idevice.addQuestion()
self.idevice.edit = True
# disable Undo once a question has been added:
self.idevice.undo = False
for element in self.questionElements:
element.process(request)
if "title"+self.id in request.args \
and not is_cancel:
self.idevice.title = request.args["title"+self.id][0]
if ("action" in request.args and request.args["action"][0] == "done"
or not self.idevice.edit):
# remove the undo flag in order to reenable it next time:
if hasattr(self.idevice,'undo'):
del self.idevice.undo
def renderEdit(self, style):
"""
Returns an XHTML string with the form element for editing this block
"""
html = "<div class=\"iDevice\">\n"
html += common.textInput("title"+self.id, self.idevice.title)
html += u"<br/><br/>\n"
for element in self.questionElements:
html += element.renderEdit()
value = _("Add another Question")
html += "<br/>"
html += common.submitButton("addQuestion"+self.id, value)
html += "<br/><br/>"
html += self.renderEditButtons(undo=self.idevice.undo)
html += "</div>\n"
return html
def renderPreview(self, style):
"""
Returns an XHTML string for previewing this block
"""
html = u"<div class=\"iDevice "
html += u"emphasis"+unicode(self.idevice.emphasis)+"\" "
html += u"ondblclick=\"submitLink('edit',"+self.id+", 0);\">\n"
html += u'<img alt="" class="iDevice_icon" '
html += u"src=\"/style/"+style+"/icon_"+self.idevice.icon
html += ".gif\" />\n"
html += u"<span class=\"iDeviceTitle\">"
html += self.idevice.title+"</span>\n"
html += u'<div class="iDevice_inner">\n'
for element in self.questionElements:
html += element.renderPreview() + "<br/>"
html += u"</div>\n"
html += self.renderViewButtons()
html += u"</div>\n"
return html
def renderView(self, style):
"""
Returns an XHTML string for viewing this block
"""
html = u'<div class="iDevice '
html += u'emphasis'+unicode(self.idevice.emphasis)+'">\n'
html += u'<img alt="" class="iDevice_icon" '
html += u'src="icon_'+self.idevice.icon+'.gif" />\n'
html += u'<span class="iDeviceTitle">'
html += self.idevice.title+'</span>\n'
html += u'<div class="iDevice_inner">\n'
for element in self.questionElements:
html += element.renderView() + "<br/>"
html += "</div></div>\n"
return html
from exe.engine.multiselectidevice import MultiSelectIdevice
from exe.webui.blockfactory import g_blockFactory
g_blockFactory.registerBlockType(MultiSelectBlock, MultiSelectIdevice)
# ===========================================================================
| gpl-2.0 |
hyperoslo/django-pipeline | pipeline/compilers/common_css.py | 1 | 5354 | import re
from os.path import dirname, normpath
from datetime import datetime
from pipeline.compilers import SubProcessCompiler
class BaseFileTree(object):
"""
The BaseFileTree represents a node and its children in a tree of @imported
CSS files. It handles the parsing of them, in order to be able to check
the modification date of the node itself and all its children.
"""
files_info = {}
import_exp = re.compile('@import\\s+("((?:[^"\r\n]|\\.)*)"|\'((?:[^\'\r\n]'
+ '|\\.)*)\'|`((?:[^`]|\\.)*)`)')
import_css = False
extensions = ('.css',)
def __init__(self, storage, name, searchpath=None):
if searchpath is None:
searchpath = ['.']
self.storage = storage
self.name = normpath(name)
self.searchpath = searchpath
self.mtime = datetime.fromtimestamp(0.0)
self.children = None
self.basepath = dirname(self.name)
self.files_info[self.name] = self
def flatlist(self, target_time, callstack=None):
"""
Will return a flat up-to-date list containing the complete list of
files that might be included by the current node, plus the current node
itself.
The information about those files is cached into memory in order to
avoid a complete parse at every call.
"""
if callstack is None:
callstack = []
ret = [self]
if self in callstack or not self.storage.exists(self.name):
return []
self.update_info()
if self.mtime > target_time or self.children is None:
self.update_children()
for child in self.children:
ret += child.flatlist(target_time, callstack + [self])
return ret
def update_info(self):
"""
Updates the current node's modification date.
"""
self.mtime = self.storage.modified_time(self.name)
def update_children(self):
"""
Triggers a re-population of the children list, by calling a parse of
the file and then processing the result.
"""
ret = []
try:
fhdl = self.storage.open(self.name, 'r')
subfiles = self.parse_imports(fhdl)
for subfile in subfiles:
for path in self.searchpath:
candidate = "%s/%s/%s" % (self.basepath, path, subfile)
if self.storage.exists(candidate):
ret.append(get_by_name(
self.__class__,
self.storage,
candidate,
self.searchpath
))
break
except IOError:
pass
self.children = ret
def parse_imports(self, fhdl):
"""
Parses a file looking for @import directives.
"""
ret = []
for line in fhdl:
matches = self.import_exp.findall(line)
for match in matches:
filename = ""
if match[0][0] == '"':
filename = match[1]
elif match[0][0] == "'":
filename = match[2]
elif match[0][0] == "`":
filename = match[3]
if filename == '':
continue
if self.import_css or not filename.endswith(".css"):
found_ext = False
possible = []
for ext in self.extensions:
possible.append("%s%s" % (filename, ext))
if filename.endswith(ext):
found_ext = True
break
if found_ext:
ret.append(filename)
else:
ret += possible
return ret
class CssCompiler(SubProcessCompiler):
"""
The CSS Compiler class helps to write CSS-derived file compiler by handling
the most common tasks that are almost identical for all the compilers.
"""
tree_object = BaseFileTree
output_extension = 'css'
def match_file(self, filename):
return filename.endswith(self.tree_object.extensions)
@staticmethod
def get_search_path():
"""
Returns the search path for this compiler. The @imported files will be
looked for in this search path.
"""
return ['.']
def is_outdated(self, infile, outfile):
tree = get_by_name(
self.tree_object,
self.storage,
infile,
self.get_search_path()
)
target_time = self.storage.modified_time(outfile)
for node in tree.flatlist(target_time):
if node.mtime > target_time:
return True
return False
def get_by_name(actual_class, storage, name, searchpath=None):
"""
Returns a file node of the matching name. If the node does not exist, it is
created on the fly.
"""
if searchpath is None:
searchpath = ['.']
path = normpath(name)
if path in actual_class.files_info:
return actual_class.files_info[path]
else:
return actual_class(storage, path, searchpath)
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/distutils/fcompiler/vast.py | 184 | 1775 | from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.fcompiler.gnu import GnuFCompiler
compilers = ['VastFCompiler']
class VastFCompiler(GnuFCompiler):
compiler_type = 'vast'
compiler_aliases = ()
description = 'Pacific-Sierra Research Fortran 90 Compiler'
version_pattern = r'\s*Pacific-Sierra Research vf90 '\
'(Personal|Professional)\s+(?P<version>[^\s]*)'
# VAST f90 does not support -o with -c. So, object files are created
# to the current directory and then moved to build directory
object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
executables = {
'version_cmd' : ["vf90", "-v"],
'compiler_f77' : ["g77"],
'compiler_fix' : ["f90", "-Wv,-ya"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def find_executables(self):
pass
def get_version_cmd(self):
f90 = self.compiler_f90[0]
d, b = os.path.split(f90)
vf90 = os.path.join(d, 'v'+b)
return vf90
def get_flags_arch(self):
vast_version = self.get_version()
gnu = GnuFCompiler()
gnu.customize(None)
self.version = gnu.get_version()
opt = GnuFCompiler.get_flags_arch(self)
self.version = vast_version
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='vast')
compiler.customize()
print(compiler.get_version())
| mit |
w1ll1am23/home-assistant | homeassistant/components/slide/__init__.py | 21 | 5503 | """Component for the Slide API."""
from datetime import timedelta
import logging
from goslideapi import GoSlideCloud, goslideapi
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.event import async_call_later, async_track_time_interval
from .const import (
API,
COMPONENT,
CONF_INVERT_POSITION,
DEFAULT_OFFSET,
DEFAULT_RETRY,
DOMAIN,
SLIDES,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Slide platform."""
async def update_slides(now=None):
"""Update slide information."""
result = await hass.data[DOMAIN][API].slides_overview()
if result is None:
_LOGGER.error("Slide API does not work or returned an error")
return
if result:
_LOGGER.debug("Slide API returned %d slide(s)", len(result))
else:
_LOGGER.warning("Slide API returned 0 slides")
for slide in result:
if "device_id" not in slide:
_LOGGER.error(
"Found invalid Slide entry, device_id is missing. Entry=%s", slide
)
continue
uid = slide["device_id"].replace("slide_", "")
slidenew = hass.data[DOMAIN][SLIDES].setdefault(uid, {})
slidenew["mac"] = uid
slidenew["id"] = slide["id"]
slidenew["name"] = slide["device_name"]
slidenew["state"] = None
oldpos = slidenew.get("pos")
slidenew["pos"] = None
slidenew["online"] = False
slidenew["invert"] = config[DOMAIN][CONF_INVERT_POSITION]
if "device_info" not in slide:
_LOGGER.error(
"Slide %s (%s) has no device_info Entry=%s",
slide["id"],
slidenew["mac"],
slide,
)
continue
# Check if we have pos (OK) or code (NOK)
if "pos" in slide["device_info"]:
slidenew["online"] = True
slidenew["pos"] = slide["device_info"]["pos"]
slidenew["pos"] = max(0, min(1, slidenew["pos"]))
if oldpos is None or oldpos == slidenew["pos"]:
slidenew["state"] = (
STATE_CLOSED
if slidenew["pos"] > (1 - DEFAULT_OFFSET)
else STATE_OPEN
)
elif oldpos < slidenew["pos"]:
slidenew["state"] = (
STATE_CLOSED
if slidenew["pos"] >= (1 - DEFAULT_OFFSET)
else STATE_CLOSING
)
else:
slidenew["state"] = (
STATE_OPEN
if slidenew["pos"] <= DEFAULT_OFFSET
else STATE_OPENING
)
elif "code" in slide["device_info"]:
_LOGGER.warning(
"Slide %s (%s) is offline with code=%s",
slide["id"],
slidenew["mac"],
slide["device_info"]["code"],
)
else:
_LOGGER.error(
"Slide %s (%s) has invalid device_info %s",
slide["id"],
slidenew["mac"],
slide["device_info"],
)
_LOGGER.debug("Updated entry=%s", slidenew)
async def retry_setup(now):
"""Retry setup if a connection/timeout happens on Slide API."""
await async_setup(hass, config)
hass.data[DOMAIN] = {}
hass.data[DOMAIN][SLIDES] = {}
username = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
scaninterval = config[DOMAIN][CONF_SCAN_INTERVAL]
hass.data[DOMAIN][API] = GoSlideCloud(username, password)
try:
result = await hass.data[DOMAIN][API].login()
except (goslideapi.ClientConnectionError, goslideapi.ClientTimeoutError) as err:
_LOGGER.error(
"Error connecting to Slide Cloud: %s, going to retry in %s second(s)",
err,
DEFAULT_RETRY,
)
async_call_later(hass, DEFAULT_RETRY, retry_setup)
return True
if not result:
_LOGGER.error("Slide API returned unknown error during authentication")
return False
_LOGGER.debug("Slide API successfully authenticated")
await update_slides()
hass.async_create_task(async_load_platform(hass, COMPONENT, DOMAIN, {}, config))
async_track_time_interval(hass, update_slides, scaninterval)
return True
| apache-2.0 |
jianglu/mojo | third_party/markupsafe/__init__.py | 371 | 8205 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from markupsafe._compat import text_type, string_types, int_types, \
unichr, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
#_escape_argspec(kwargs, kwargs.iteritems(), None)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| bsd-3-clause |
jmcorgan/gnuradio | gr-qtgui/examples/pyqt_example_f.py | 58 | 6185 | #!/usr/bin/env python
#
# Copyright 2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 1000
f2 = 2000
fftsize = 2048
self.qapp = QtGui.QApplication(sys.argv)
src1 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_f(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_ff()
thr = blocks.throttle(gr.sizeof_float, 100*fftsize)
noise = analog.noise_source_f(analog.GR_GAUSSIAN, 0.001)
add = blocks.add_ff()
self.snk1 = qtgui.sink_f(fftsize, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Float Signal Example",
True, True, True, False)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, thr, (add,0))
self.connect(noise, (add,1))
self.connect(add, self.snk1)
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
harry-7/addons-server | src/olympia/amo/tests/test_readonly.py | 2 | 1197 | from django.db import models
import MySQLdb as mysql
import pytest
from pyquery import PyQuery as pq
from olympia.addons.models import Addon
@pytest.yield_fixture
def read_only_mode(client, settings, db):
def _db_error(*args, **kwargs):
raise mysql.OperationalError("You can't do this in read-only mode.")
settings.SLAVE_DATABASES = ['default']
models.signals.pre_save.connect(_db_error)
models.signals.pre_delete.connect(_db_error)
from olympia.lib.settings_base import read_only_mode
env = {key: getattr(settings, key) for key in settings._explicit_settings}
read_only_mode(env)
for key, value in env.items():
setattr(settings, key, value)
client.handler.load_middleware()
yield
models.signals.pre_save.disconnect(_db_error)
models.signals.pre_delete.disconnect(_db_error)
def test_db_error(read_only_mode):
with pytest.raises(mysql.OperationalError):
Addon.objects.create(id=12)
def test_bail_on_post(read_only_mode, client):
r = client.post('/en-US/firefox/')
assert r.status_code == 503
title = pq(r.content)('title').text()
assert title.startswith('Maintenance in progress'), title
| bsd-3-clause |
olologin/scikit-learn | build_tools/circle/check_build_doc.py | 49 | 2478 | """Check whether we or not we should build the documentation
If the last commit message has a "[doc skip]" marker, do not build
the doc. On the contrary if a "[doc build]" marker is found, build the doc
instead of relying on the subsequent rules.
We always build the documentation for jobs that are not related to a specific
PR (e.g. a merge to master or a maintenance branch).
If this is a PR, check that if there are some files in this PR that are under
the "doc/" or "examples/" folders, otherwise skip.
If the introspection of the current commit fails for any reason, the default
behavior is to build the documentation.
"""
import sys
import os
from subprocess import check_output, CalledProcessError
def exit(msg="", skip=False):
print("%s: %s" % ("SKIP" if skip else "BUILD", msg))
sys.exit(0)
# Introspect the message for the commit that triggered the build
commit = os.environ.get('CIRCLE_SHA1')
if not commit:
exit("undefined CIRCLE_SHA1 variable")
try:
commit_msg = check_output("git log --format=%B -n 1".split() + [commit])
commit_msg = commit_msg.decode('utf-8')
except CalledProcessError:
exit("failed to introspect commit message for %s" % commit)
if "[doc skip]" in commit_msg:
exit("[doc skip] marker found", skip=True)
elif "[doc build]" in commit_msg:
exit("[doc build] marker found")
# Check whether this commit is part of a pull request or not
pr_url = os.environ.get('CI_PULL_REQUEST')
if not pr_url:
# The documentation should be always built when executed from one of the
# main branches
exit("not a pull request")
# Introspect the list of files changed by all the commits in this PR.
# Hardcode the assumption that this is a PR to origin/master of this repo
# as apparently there is way to reliably get the target of a PR with circle
# ci
git_range = "origin/master...%s" % commit
try:
check_output("git fetch origin master".split())
filenames = check_output("git diff --name-only".split() + [git_range])
except CalledProcessError:
exit("git introspection failed.")
filenames = filenames.decode('utf-8').split()
for filename in filenames:
if filename.startswith(u'doc/') or filename.startswith(u'examples/'):
exit("detected doc impacting file modified by PR in range %s: %s"
% (git_range, filename))
# This PR does not seem to have any documentation related file changed.
msg = "no doc impacting files detected:\n" + u"\n".join(filenames)
exit(msg, skip=True)
| bsd-3-clause |
azumimuo/family-xbmc-addon | plugin.video.sanctuary/lib/freeview/process.py | 1 | 10551 | import urllib2, urllib, xbmcgui, xbmcplugin, xbmc, re, sys, os, xbmcaddon, json, time
ADDON_PATH = xbmc.translatePath('special://home/addons/plugin.video.sanctuary/')
USERDATA_PATH = xbmc.translatePath('special://home/userdata/addon_data')
ADDON_DATA = USERDATA_PATH + '/Sanctuary/'
if not os.path.exists(ADDON_DATA):
os.makedirs(ADDON_DATA)
ICON = ADDON_PATH + 'icon.png'
FANART = ADDON_PATH + 'fanart.jpg'
Dialog = xbmcgui.Dialog()
addon_id = 'plugin.video.sanctuary'
ADDON = xbmcaddon.Addon(id=addon_id)
PATH = 'Sanctuary'
VERSION = '0.0.1'
addon_resources = ADDON_PATH + 'scripts/'
favourites = ADDON_DATA + 'favourites'
if os.path.exists(favourites)==True:
FAV = open(favourites).read()
else: FAV = []
dp = xbmcgui.DialogProgress()
addon_handle = int(sys.argv[1])
List = []
temp_file = ADDON_PATH + 'Temp.txt'
debug = ADDON.getSetting('debug')
def Menu(name,url,mode,iconimage,fanart,description,extra,showcontext=True,allinfo={}):
if iconimage == '':
iconimage = ICON
elif iconimage == ' ':
iconimage = ICON
if fanart == '':
fanart = FANART
elif fanart == ' ':
fanart = FANART
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&fanart="+urllib.quote_plus(fanart)+"&description="+urllib.quote_plus(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": description } )
liz.setProperty( "Fanart_Image", fanart )
if showcontext:
contextMenu = []
if showcontext == 'fav':
contextMenu.append(('Remove from Sanctuary Favorites','XBMC.RunPlugin(%s?mode=12&name=%s)'
%(sys.argv[0], urllib.quote_plus(name))))
if not name in FAV:
contextMenu.append(('Add to Sanctuary Favorites','XBMC.RunPlugin(%s?mode=11&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=%s)'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart), mode)))
liz.addContextMenuItems(contextMenu)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def Play(name,url,mode,iconimage,fanart,description,extra,showcontext=True,allinfo={}):
if iconimage == '':
iconimage = ICON
if fanart == '':
fanart = FANART
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&fanart="+urllib.quote_plus(fanart)+"&description="+urllib.quote_plus(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, "Plot": description } )
liz.setProperty( "Fanart_Image", fanart )
if showcontext:
contextMenu = []
if showcontext == 'fav':
contextMenu.append(('Remove from Sanctuary Favorites','XBMC.RunPlugin(%s?mode=12&name=%s)'
%(sys.argv[0], urllib.quote_plus(name))))
if not name in FAV:
contextMenu.append(('Add to Sanctuary Favorites','XBMC.RunPlugin(%s?mode=11&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=%s)'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart), mode)))
liz.addContextMenuItems(contextMenu)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
xbmcplugin.endOfDirectory(int(sys.argv[1]))
#===============================Favourites-----------Not sure whos code this is but credit due to them-------------------------------
def addon_log(string):
if debug == 'true':
xbmc.log("[addon.live.Sanctuary-%s]: %s" %(addon_version, string))
def addFavorite(name,url,iconimage,fanart,mode,playlist=None,regexs=None):
favList = []
try:
# seems that after
name = name.encode('utf-8', 'ignore')
except:
pass
if os.path.exists(favourites)==False:
addon_log('Making Favorites File')
favList.append((name,url,iconimage,fanart,mode,playlist,regexs))
a = open(favourites, "w")
a.write(json.dumps(favList))
a.close()
else:
addon_log('Appending Favorites')
a = open(favourites).read()
data = json.loads(a)
data.append((name,url,iconimage,fanart,mode))
b = open(favourites, "w")
b.write(json.dumps(data))
b.close()
def getFavorites():
if os.path.exists(favourites)==False:
favList = []
addon_log('Making Favorites File')
favList.append(('Sanctuary Favourites Section','','','','','',''))
a = open(favourites, "w")
a.write(json.dumps(favList))
a.close()
else:
items = json.loads(open(favourites).read())
total = len(items)
for i in items:
name = i[0]
url = i[1]
iconimage = i[2]
try:
fanArt = i[3]
if fanArt == None:
raise
except:
if ADDON.getSetting('use_thumb') == "true":
fanArt = iconimage
else:
fanArt = fanart
try: playlist = i[5]
except: playlist = None
try: regexs = i[6]
except: regexs = None
if i[4] == 0:
Menu(name,url,'',iconimage,fanart,'','','fav')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
else:
Menu(name,url,i[4],iconimage,fanart,'','','fav')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def rmFavorite(name):
data = json.loads(open(favourites).read())
for index in range(len(data)):
if data[index][0]==name:
del data[index]
b = open(favourites, "w")
b.write(json.dumps(data))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
############################## FAVOURITES END ###############################
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
url=None
name=None
iconimage=None
mode=None
description=None
extra=None
fav_mode=None
try:
fav_mode=int(params["fav_mode"])
except:
pass
try:
extra=urllib.unquote_plus(params["extra"])
except:
pass
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
description=urllib.unquote_plus(params["description"])
except:
pass
def Resolve(url):
play=xbmc.Player()
import urlresolver
try: play.play(url)
except: pass
def OPEN_URL(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = ''
link = ''
try:
response = urllib2.urlopen(req)
link=response.read()
response.close()
except: pass
if link != '':
return link
else:
link = 'Opened'
return link
def setView(content, viewType):
if content:
xbmcplugin.setContent(int(sys.argv[1]), content)
def resolve_playercore(url):
play=xbmc.Player(GetPlayerCore())
import urlresolver
try: play.play(url)
except: pass
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def Big_Resolve(url):
play=xbmc.Player(GetPlayerCore())
import urlresolver
try: play.play(url)
except: pass
from urlresolver import common
dp = xbmcgui.DialogProgress()
dp.create('LOADING','Opening %s Now'%(name))
play=xbmc.Player(GetPlayerCore())
url=urlresolver.HostedMediaFile(url).resolve()
if dp.iscanceled():
print "[COLORblue]STREAM CANCELLED[/COLOR]" # need to get this part working
dialog = xbmcgui.Dialog()
if dialog.yesno("[B]CANCELLED[/B]", '[B]Was There A Problem[/B]','', "",'Yes','No'):
dialog.ok("Message Send", "Your Message Has Been Sent")
else:
return
else:
try: play.play(url)
except: pass
try: ADDON.resolve_url(url)
except: pass
dp.close()
def GetPlayerCore():
try:
PlayerMethod=getSet("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
except: PlayerMeth=xbmc.PLAYER_CORE_AUTO
return PlayerMeth
return True
xbmcplugin.endOfDirectory(int(sys.argv[1]))
if mode==10:
addon_log("getFavorites")
getFavorites()
elif mode==11:
addon_log("addFavorite")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
addFavorite(name,url,iconimage,fanart,fav_mode)
elif mode==12:
addon_log("rmFavorite")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
rmFavorite(name)
| gpl-2.0 |
Bashar/django | tests/proxy_model_inheritance/tests.py | 34 | 1921 | from __future__ import absolute_import, unicode_literals
import os
from django.core.management import call_command
from django.test import TestCase, TransactionTestCase
from django.test.utils import override_system_checks, extend_sys_path
from django.utils._os import upath
from .models import (ConcreteModel, ConcreteModelSubclass,
ConcreteModelSubclassProxy)
class ProxyModelInheritanceTests(TransactionTestCase):
"""
Proxy model inheritance across apps can result in migrate not creating the table
for the proxied model (as described in #12286). This test creates two dummy
apps and calls migrate, then verifies that the table has been created.
"""
available_apps = []
# `auth` app is imported, but not installed in this test, so we need to
# exclude checks registered by this app.
@override_system_checks([])
def test_table_exists(self):
with extend_sys_path(os.path.dirname(os.path.abspath(upath(__file__)))):
with self.modify_settings(INSTALLED_APPS={'append': ['app1', 'app2']}):
call_command('migrate', verbosity=0)
from app1.models import ProxyModel
from app2.models import NiceModel
self.assertEqual(NiceModel.objects.all().count(), 0)
self.assertEqual(ProxyModel.objects.all().count(), 0)
class MultiTableInheritanceProxyTest(TestCase):
def test_model_subclass_proxy(self):
"""
Deleting an instance of a model proxying a multi-table inherited
subclass should cascade delete down the whole inheritance chain (see
#18083).
"""
instance = ConcreteModelSubclassProxy.objects.create()
instance.delete()
self.assertEqual(0, ConcreteModelSubclassProxy.objects.count())
self.assertEqual(0, ConcreteModelSubclass.objects.count())
self.assertEqual(0, ConcreteModel.objects.count())
| bsd-3-clause |
abloomston/sympy | sympy/plotting/pygletplot/plot_interval.py | 94 | 5432 | from __future__ import print_function, division
from sympy import Symbol, Integer, sympify
from sympy.core.compatibility import range
class PlotInterval(object):
"""
"""
_v, _v_min, _v_max, _v_steps = None, None, None, None
def require_all_args(f):
def check(self, *args, **kwargs):
for g in [self._v, self._v_min, self._v_max, self._v_steps]:
if g is None:
raise ValueError("PlotInterval is incomplete.")
return f(self, *args, **kwargs)
return check
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], PlotInterval):
self.fill_from(args[0])
return
elif isinstance(args[0], str):
try:
args = eval(args[0])
except TypeError:
s_eval_error = "Could not interpret string %s."
raise ValueError(s_eval_error % (args[0]))
elif isinstance(args[0], (tuple, list)):
args = args[0]
else:
raise ValueError("Not an interval.")
if not isinstance(args, (tuple, list)) or len(args) > 4:
f_error = "PlotInterval must be a tuple or list of length 4 or less."
raise ValueError(f_error)
args = list(args)
if len(args) > 0 and (args[0] is None or isinstance(args[0], Symbol)):
self.v = args.pop(0)
if len(args) in [2, 3]:
self.v_min = args.pop(0)
self.v_max = args.pop(0)
if len(args) == 1:
self.v_steps = args.pop(0)
elif len(args) == 1:
self.v_steps = args.pop(0)
def get_v(self):
return self._v
def set_v(self, v):
if v is None:
self._v = None
return
if not isinstance(v, Symbol):
raise ValueError("v must be a sympy Symbol.")
self._v = v
def get_v_min(self):
return self._v_min
def set_v_min(self, v_min):
if v_min is None:
self._v_min = None
return
try:
self._v_min = sympify(v_min)
float(self._v_min.evalf())
except TypeError:
raise ValueError("v_min could not be interpreted as a number.")
def get_v_max(self):
return self._v_max
def set_v_max(self, v_max):
if v_max is None:
self._v_max = None
return
try:
self._v_max = sympify(v_max)
float(self._v_max.evalf())
except TypeError:
raise ValueError("v_max could not be interpreted as a number.")
def get_v_steps(self):
return self._v_steps
def set_v_steps(self, v_steps):
if v_steps is None:
self._v_steps = None
return
if isinstance(v_steps, int):
v_steps = Integer(v_steps)
elif not isinstance(v_steps, Integer):
raise ValueError("v_steps must be an int or sympy Integer.")
if v_steps <= Integer(0):
raise ValueError("v_steps must be positive.")
self._v_steps = v_steps
@require_all_args
def get_v_len(self):
return self.v_steps + 1
v = property(get_v, set_v)
v_min = property(get_v_min, set_v_min)
v_max = property(get_v_max, set_v_max)
v_steps = property(get_v_steps, set_v_steps)
v_len = property(get_v_len)
def fill_from(self, b):
if b.v is not None:
self.v = b.v
if b.v_min is not None:
self.v_min = b.v_min
if b.v_max is not None:
self.v_max = b.v_max
if b.v_steps is not None:
self.v_steps = b.v_steps
@staticmethod
def try_parse(*args):
"""
Returns a PlotInterval if args can be interpreted
as such, otherwise None.
"""
if len(args) == 1 and isinstance(args[0], PlotInterval):
return args[0]
try:
return PlotInterval(*args)
except ValueError:
return None
def _str_base(self):
return ",".join([str(self.v), str(self.v_min),
str(self.v_max), str(self.v_steps)])
def __repr__(self):
"""
A string representing the interval in class constructor form.
"""
return "PlotInterval(%s)" % (self._str_base())
def __str__(self):
"""
A string representing the interval in list form.
"""
return "[%s]" % (self._str_base())
@require_all_args
def assert_complete(self):
pass
@require_all_args
def vrange(self):
"""
Yields v_steps+1 sympy numbers ranging from
v_min to v_max.
"""
d = (self.v_max - self.v_min) / self.v_steps
for i in range(self.v_steps + 1):
a = self.v_min + (d * Integer(i))
yield a
@require_all_args
def vrange2(self):
"""
Yields v_steps pairs of sympy numbers ranging from
(v_min, v_min + step) to (v_max - step, v_max).
"""
d = (self.v_max - self.v_min) / self.v_steps
a = self.v_min + (d * Integer(0))
for i in range(self.v_steps):
b = self.v_min + (d * Integer(i + 1))
yield a, b
a = b
def frange(self):
for i in self.vrange():
yield float(i.evalf())
| bsd-3-clause |
dongweiming/code | vilya/models/sphinx_docs.py | 3 | 17656 | # coding: utf-8
import pickle
import tempfile
import os
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
debug = logger.debug
warn = logger.warn
import subprocess as sp
import shutil
import datetime as dt
from urllib import quote
import sphinx # noqa Some pickles need this import
from vilya.libs.store import mc
from vilya.libs.gyt import GytRepoNotInited, GytError
from vilya.libs.permdir import get_tmpdir, get_repo_root
from vilya.models.project import CodeDoubanProject
from vilya.models.consts import (
DOC_EXT, SPHINX_BUILD_DOCTREES,
SPHINX_BUILDER_TYPES, SPHINX_DEFAULT_CHECKOUT_ROOT)
DEFAULT_BUILDER = 'pickle'
ENABLED_BUILDERS = SPHINX_BUILDER_TYPES
LAST_BUILD_MC_KEY = 'sphinx-docs:%s:last_build:v2'
LAST_TREE_HASH_MC_KEY = 'sphinx-docs:%s:%s:last_tree_hash:v1'
def _move_to_dir(fromd, tod):
shutil.rmtree(tod, ignore_errors=True)
shutil.copytree(fromd, tod)
shutil.rmtree(fromd)
def _export_docs_tree(project_id, tree_hash, temp_dir):
_call = CodeDoubanProject.get(project_id).git.call
debug('Exporting docs tree')
_call('read-tree --empty')
try:
_call('read-tree %s' % tree_hash)
except GytError as e:
if 'failed to unpack tree object' in e.args[2]:
return False, 'failed to unpack tree object'
else:
raise
# TODO use gyt repo to handle worktree
_call('--work-tree %s checkout-index --force -a' % temp_dir)
debug('Checked out the docs content work-tree')
return True, ''
def _tree_hash(project_id, path):
_call = CodeDoubanProject.get(project_id).git.call
return _call('rev-parse HEAD:%s' % path, _raise=False)
def _builder_conf(project_id, builder_name):
prj = CodeDoubanProject.get(project_id)
if not prj.conf.get('docs'):
return {'dir': builder_name}
conf = prj.conf['docs'].get(builder_name, False)
if not conf:
return {'dir': builder_name}
if 'dir' not in conf:
conf['dir'] = builder_name # When no explicit dir, use the name
if 'checkout_root' not in conf:
conf['checkout_root'] = SPHINX_DEFAULT_CHECKOUT_ROOT
return conf
def _builders_list(project_id):
prj = CodeDoubanProject.get(project_id)
if not prj.conf['docs']:
return [DEFAULT_BUILDER]
blds = [(v.get('sort'), k) for k, v in prj.conf['docs'].items()]
blds = sorted(blds)
blds = [_[1] for _ in blds]
return blds
def _check_conf(conf):
assert isinstance(conf, dict), "Docs config must be a dict"
vs = conf.values()
assert all(isinstance(_, dict) for _ in vs), "All docs confs must be dicts"
assert all(
_.get('builder', DEFAULT_BUILDER) in ENABLED_BUILDERS for _ in vs), (
"All docs confs must have a builder chosen in %s (choose %s if unsure)"
% (ENABLED_BUILDERS, DEFAULT_BUILDER))
def guess_builder_from_path(builders, proj, path):
assert len(builders) > 0, "Need at least one builder"
for builder_name in builders:
if (path == "/%s/docs/%s" % (proj, builder_name) or
path.startswith("/%s/docs/%s/" % (proj, builder_name))):
return builder_name, True
else:
return builders[0], False # First default, and implicit
class SphinxDocs(object): # TODO rename this is not sphinx-only
builds_logs = []
disabled_reason = 'Unknown reason'
def __init__(self, project_name, allow_old_conf=True):
project = CodeDoubanProject.get_by_name(project_name)
assert project, "Need existing project"
self.project_id = project.id
# if not project or not is_git_dir(project.git_path):
if not project:
self.enabled = False
self.disabled_reason = "No project +_+"
return
try:
self.conf_new = project.conf.get('docs', None)
except GytRepoNotInited:
self.enabled = False
self.disabled_reason = "No project +_+"
return
if self.conf_new:
try:
_check_conf(self.conf_new)
except AssertionError, err:
logging.warning("Docs config error: %s" % err)
self.enabled = False
self.disabled_reason = str(err)
return
self.enabled = True
self.builders = _builders_list(project.id)
mc.set(LAST_BUILD_MC_KEY % self.project_id, None)
def last_build_info(self):
return mc.get(LAST_BUILD_MC_KEY % self.project_id)
def need_rebuild(self):
last_build = self.last_build_info()
if not last_build or not last_build['builds']:
return True
for builder_name in self.builders:
builder = self.get_builder(builder_name)
if builder.need_rebuild():
return True
return False
def _save_build_state(self, status, message=''):
last_build = {
'message': message,
'date': dt.datetime.now(),
'status': status,
'builds': self.builds_logs,
}
mc.set(LAST_BUILD_MC_KEY % self.project_id, last_build)
def build_all(self):
self.builds_logs = []
self._save_build_state('started')
if not self.enabled:
self._save_build_state('disabled', self.disabled_reason)
return 'disabled'
builder_name = ''
for builder_name in self.builders:
self._build_one(builder_name)
def _build_one(self, builder_name):
debug("Starting full build for %s" % builder_name)
self._save_build_state('exported')
builder = None
try:
success = True
builder = self.get_builder(builder_name)
ok, status = builder.prepare()
if not ok:
self._save_build_state(status)
return
ok, build_status = builder.build()
self.builds_logs.append(build_status)
self._save_build_state('building')
if not ok:
success = False
self._save_build_state('FAILED')
warn("Warning! Unable to build %s" % builder_name)
else:
self._save_build_state('building')
if success:
builder.move_to_permdir()
self._save_build_state('success')
else:
self._save_build_state('FAILED')
finally:
if builder:
builder.clean_up()
def get_builder(self, builder_name=None):
if not builder_name:
builder_name = self.builders[0] # First is default
assert builder_name in self.builders
if builder_name not in self.conf_new:
builder_type = DEFAULT_BUILDER
elif 'builder' not in self.conf_new[builder_name]:
builder_type = DEFAULT_BUILDER
else:
builder_type = self.conf_new[builder_name]['builder']
assert builder_type in ENABLED_BUILDERS, \
"builder type %s unknown" % builder_type
builders_map = {
'pickle': SphinxDocBuilderPickle,
'raw': DocBuilderRaw,
'html': SphinxDocBuilderDefault,
}
assert set(builders_map.keys()) == set(ENABLED_BUILDERS)
bc = builders_map[builder_type]
retval = bc(builder_name, self.project_id)
return retval
def get_url_from_path(self, path):
for builder_name in self.builders:
builder = self.get_builder(builder_name)
if path.startswith("%s/" % builder.dir):
path = path.replace(builder.dir, "", 1)
bd = builder
return quote(bd.get_url_from_path(path))
def _search(build_path, q):
si = _unpickle_global(build_path, "searchindex.pickle")
toks = _find_tokens(si['terms'].keys(), q)
results = []
for tok in toks:
links = si['terms'][tok]
if isinstance(links, int):
links = [links]
for link in links:
results.append((si['filenames'][link], si['titles'][link]))
return results
class AbstractDocBuilder(object):
needed_form_vars = []
template = False
slash_urls = False
static_prefixes = []
redirects = {}
masterdoc = 'index'
def __init__(self, builder, project_id):
self.builder = builder
self.project_id = project_id
prj = CodeDoubanProject.get(project_id)
self.docs_dir = os.path.join(get_repo_root(),
prj.name + DOC_EXT, builder)
self.config = _builder_conf(project_id, builder)
self.dir = self.config['dir']
self.temp_dir = None
self.temp_dir_root = None
def file_is_static(self, path):
return any(path.startswith(_) for _ in self.static_prefixes)
def build(self):
raise NotImplementedError()
def template_data(self, path, form_vars):
raise NotImplementedError()
def raw_content(self, path, form_vars):
raise NotImplementedError()
def file_content(self, path):
fp = os.path.join(self._path(), path)
if not os.path.exists(fp):
debug("Path do not exist: %s" % fp)
return False
with open(fp) as f:
content = f.read()
return content
def _path(self, tmp=False):
if tmp:
assert self.temp_dir
path = os.path.join(self.temp_dir, '.build', self.builder)
else:
path = os.path.join(self.docs_dir, '.build', self.builder)
return path
def prepare(self):
self.temp_dir_root = tempfile.mkdtemp(
prefix='sphinx_docs_', dir=get_tmpdir())
if self.config['checkout_root']:
tree_hash = _tree_hash(self.project_id, '')
else:
tree_hash = _tree_hash(self.project_id, self.dir)
if not tree_hash or not self.has_content():
warn("No docs directory in project repo at HEAD for builder %s" %
self.builder)
return False, 'no_doc_dir_found'
mc.set(LAST_TREE_HASH_MC_KEY % (self.project_id,
self.builder), tree_hash)
if os.path.exists(self.docs_dir):
shutil.rmtree(self.docs_dir, ignore_errors=True)
try:
os.makedirs(self.docs_dir)
except OSError:
pass
ret, msg = _export_docs_tree(
self.project_id, tree_hash, self.temp_dir_root)
if not ret:
return False, msg
if self.config['checkout_root']:
self.temp_dir = os.path.join(self.temp_dir_root, self.dir)
else:
self.temp_dir = self.temp_dir_root
return True, 'success'
def has_content(self):
tree_hash = _tree_hash(self.project_id, self.dir)
return bool(tree_hash)
def move_to_permdir(self):
debug('Moving to permdir: %s -> %s' % (self.temp_dir, self.docs_dir))
_move_to_dir(self.temp_dir, self.docs_dir)
def clean_up(self):
if self.temp_dir_root and os.path.isdir(self.temp_dir_root):
shutil.rmtree(self.temp_dir_root)
mc.set(LAST_TREE_HASH_MC_KEY, None)
mc.set(LAST_BUILD_MC_KEY, None)
def need_rebuild(self):
last_tree_hash = mc.get(
LAST_TREE_HASH_MC_KEY % (self.project_id, self.builder))
if self.config['checkout_root']:
current_tree_hash = _tree_hash(self.project_id, '')
else:
current_tree_hash = _tree_hash(self.project_id, self.dir)
if not last_tree_hash:
return True
if last_tree_hash != current_tree_hash:
return True
return False
def with_comment(self):
return self.config.get('with_comment', True)
class SphinxDocBuilder(AbstractDocBuilder):
static_prefixes = ['_images', '_sources', '_static', '_downloads']
def build(self):
debug('Starting SphinxDocBuilder build')
cmd = self._make_cmd()
# TODO lock this dir!
debug('Building sphinx: %s' % ' '.join(cmd))
process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
debug('Process Popened')
out, err = process.communicate()
debug('Process communicated')
returncode = process.returncode
status = {
'builder': self.builder,
'out': out,
'error': err,
'returncode': returncode,
'command': cmd,
}
if returncode != 0:
debug("Error building SphinxDocBuilder %s" % err)
return returncode == 0, status
def _make_cmd(self):
assert self.temp_dir
sphinx_builder = self.config.get('builder', self.builder) # noqa TODO rm self.builder when no more old config
assert sphinx_builder in [
'pickle', 'html'], "sphinx builder %s unknown" % sphinx_builder
options = []
prj = CodeDoubanProject.get(self.project_id)
default_options = {
'master_doc': self.masterdoc,
'source_suffix': '.rst',
'html_short_title': prj.name,
}
options_dict = default_options.copy()
if self.config:
options_dict.update(self.config)
for k, v in options_dict.items():
options.extend(['-D', '%s=%s' % (k, v)])
cmd = [
'sphinx-build',
'-a', # -- write all files; default is to only write new and changed files
'-E', # -- don't use a saved environment, always read all files
'-b', sphinx_builder, # -- builder to use
'-D', 'project=%s' % prj.name,
# -- override a setting in configuration
]
cmd += options
# -- path for the cached environment and doctree files
cmd += ['-d', os.path.join(self.temp_dir, SPHINX_BUILD_DOCTREES)]
cmd += ['-q']
if self._has_sphinx_conf():
# -- path where configuration file (conf.py) is located
cmd += ['-c', self.temp_dir]
else:
cmd += ['-C'] # No conf.py file
cmd += [self.temp_dir, self._path(tmp=True)]
return cmd
def _has_sphinx_conf(self):
sphinx_conf = os.path.join(self.temp_dir, 'conf.py')
return os.path.exists(sphinx_conf)
class SphinxDocBuilderPickle(SphinxDocBuilder):
template = 'sphinx_docs.html'
needed_form_vars = ['q']
slash_urls = True
redirects = {'index': ''}
def get_url_from_path(self, path):
t = path.partition('/')[2]
t = t.rpartition('.')[0]
return 'docs/%s/%s/' % (self.builder, t)
def template_data(self, path, form_vars):
tdt = _unpickled(self._path(), path)
if path == 'search':
q = form_vars['q']
tdt['searchresult'] = _search(self._path(), q)
return tdt
class SphinxDocBuilderDefault(SphinxDocBuilder):
redirects = {'': 'index.html'}
def raw_content(self, path, form_vars):
fp = os.path.join(self._path(), path)
if not os.path.exists(fp):
debug("Path do not exist: %s" % fp)
return False
with open(fp) as f:
content = f.read()
return content
def get_url_from_path(self, path):
t = path.partition('/')[2]
t = t.rpartition('.')[0]
return "docs/%s/%s.html" % (self.builder, t)
class DocBuilderRaw(AbstractDocBuilder):
redirects = {'': 'index.html'}
def build(self):
debug('Starting DocBuilderRaw build')
target_dir = self._path(tmp=True)
ignore = shutil.ignore_patterns(('.build',))
debug("Copying tree from %s to %s" % (self.temp_dir, target_dir))
shutil.copytree(self.temp_dir, target_dir, ignore=ignore)
status = {
'builder': self.builder,
'out': 'tree copied to .build/raw',
'error': '',
'returncode': None,
'command': ['shutil.copytree(%s, %s)' % (
self.temp_dir, target_dir)],
}
return True, status
def file_is_static(self, path):
return True
def raw_content(self, path, form_vars):
fp = os.path.join(self._path(), path)
if not os.path.exists(fp):
debug("Path do not exist: %s" % fp)
return False
with open(fp) as f:
content = f.read()
os.remove(fp)
return content
def get_url_from_path(self, path):
t = path.partition('/')[2]
return "docs/%s/%s" % (self.builder, t)
def _find_tokens(keys, key):
key = key.lower().strip()
return [k for k in keys if k.startswith(key)]
def _unpickled(build_path, path):
gc = _unpickle_global(build_path, "globalcontext.pickle")
if not gc:
return {'error': 'Unable to find global context'}
if not path:
path = gc['master_doc']
pk = _unpickle_target(build_path, path, gc)
if not pk:
pk = _unpickle_target(
build_path, path + '/' + gc['master_doc'], gc) # Hack
if not pk:
return {'error': 'Unable to find pickle', 'gc': gc}
pk['gc'] = gc
pk['error'] = False
return pk
def _unpickle_global(build_path, fn):
fp = os.path.join(build_path, fn)
if not os.path.exists(fp):
return False
with open(fp) as f:
p = pickle.load(f)
return p
def _unpickle_target(build_path, path, gc):
fp = os.path.join(build_path, path)
fp += gc['file_suffix']
if not os.path.exists(fp):
return False
with open(fp) as f:
p = pickle.load(f)
return p
| bsd-3-clause |
jinjin123/devops2.0 | devops/ops/views/assets/assets.py | 1 | 15231 | #!/usr/bin/env python
# _#_ coding:utf-8 _*_
import os,sys
from django.http import JsonResponse
from django.shortcuts import render
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from ops.models import *
from ops.views.ssh_settings import keyfile_dir
from django.db.models import Count
from ops.views.asb_model.ansibleApi import ANSRunner
from ops.views.tasks.tasks import recordAssets
from django.contrib.auth.decorators import permission_required
def getBaseAssets():
try:
businessList = Business_Assets.objects.all()
except:
businessList = []
try:
serviceList = Service_Assets.objects.all()
except:
serviceList = []
try:
idcList = Idc_Assets.objects.all()
except:
idcList = []
try:
lineList = Line_Assets.objects.all()
except:
lineList = []
try:
raidList = Raid_Assets.objects.all()
except:
raidList = []
try:
keyList=[]
for _file in os.listdir(keyfile_dir):
keyList.append(_file)
except:
keyList = []
return {"business": businessList, "service": serviceList, "idc": idcList,
"line": lineList, "raid": raidList,"keyfile": keyList}
@login_required()
def assets_config(request):
img = 'media/' + str(request.user.head_img)
return render(request, 'assets_config.html', {"user": request.user, "head": img,"baseAssets": getBaseAssets()})
@login_required()
def assets_add(request):
img = 'media/' + str(request.user.head_img)
if request.method == "GET":
return render(request, 'assets_add.html', {"user": request.user,"head":img, "baseAssets": getBaseAssets()})
@login_required()
def assets_list(request):
img = 'media/' + str(request.user.head_img)
assetsList = Assets.objects.all().order_by("-id")
assetOnline = Assets.objects.filter(status=0).count()
assetOffline = Assets.objects.filter(status=1).count()
assetMaintain = Assets.objects.filter(status=2).count()
assetsNumber = Assets.objects.values('assets_type').annotate(dcount=Count('assets_type'))
return render(request,'assets_list.html', {"user": request.user,"head":img, "totalAssets": assetsList.count(),
"assetOnline": assetOnline, "assetOffline": assetOffline,
"assetMaintain": assetMaintain, "baseAssets": getBaseAssets(),
"assetsList": assetsList, "assetsNumber": assetsNumber},
)
@login_required()
@permission_required('ops.can_read_assets', login_url='/noperm/')
def assets_view(request, aid):
# print request.META
img = 'http://' + request.META['HTTP_HOST'] + '/media/' + str(request.user.head_img)
try:
assets = Assets.objects.get(id=aid)
except:
return render(request, '404.html', {"user": request.user},
)
if assets.assets_type == 'server':
try:
asset_ram = assets.ram_assets_set.all()
except:
asset_ram = []
try:
asset_disk = assets.disk_assets_set.all()
except:
asset_disk = []
try:
asset_body = assets.hostinfo
except:
return render(request, 'assets_view.html', {"user": request.user,"head":img})
return render(request, 'assets_view.html', {"user": request.user,"head":img, "asset_type": assets.assets_type,
"asset_main": assets, "asset_body": asset_body,
"asset_ram": asset_ram, "asset_disk": asset_disk,
"baseAssets": getBaseAssets()},
)
else:
try:
asset_body = assets.network_assets
except:
return render(request, 'assets_view.html', {"user": request.user,"head": img})
return render(request, 'assets_view.html', {"user": request.user,"head":img, "asset_type": assets.assets_type,
"asset_main": assets, "asset_body": asset_body,
"baseAssets": getBaseAssets()},
)
@login_required()
def assets_modf(request, aid):
img = 'http://' + request.META['HTTP_HOST'] + '/media/' + str(request.user.head_img)
try:
assets = Assets.objects.get(id=aid)
except:
return render('assets_modf.html', {"user": request.user,"head":img})
if assets.assets_type == 'server':
try:
asset_ram = assets.ram_assets_set.all()
except:
asset_ram = []
try:
asset_disk = assets.disk_assets_set.all()
except:
asset_disk = []
try:
asset_body = assets.hostinfo
except:
return render(request, '404.html', {"user": request.user,"head": img})
return render(request, 'assets_modf.html', {"user": request.user,"head": img, "asset_type": assets.assets_type,
"asset_main": assets, "asset_body": asset_body,
"asset_ram": asset_ram, "asset_disk": asset_disk,
"assets_data": getBaseAssets()},
)
else:
try:
asset_body = assets.network_assets
except:
return render(request, 'assets_modf.html', {"user": request.user,"head": img})
return render(request, 'assets_modf.html', {"user": request.user,"head":img, "asset_type": assets.assets_type,
"asset_main": assets, "asset_body": asset_body,
"assets_data": getBaseAssets()},
)
@login_required()
def assets_facts(request, args=None):
if request.method == "POST" and request.user.has_perm('ops.change_server_assets'):
server_id = request.POST.get('server_id')
genre = request.POST.get('type')
if genre == 'setup':
try:
server_assets = HostInfo.objects.get(id=request.POST.get('server_id'))
if server_assets.login_type == "秘钥方式":
resource = [{"hostname": server_assets.ip, "port": int(server_assets.port)}]
else:
resource = [
{"hostname": server_assets.ip, "port": server_assets.port, "username": server_assets.user,
"password": server_assets.pwd}]
except Exception as e:
return JsonResponse({'msg': "数据更新失败-查询不到该主机资料~", "code": 502})
ANS = ANSRunner(resource)
ANS.run_model(host_list=[server_assets.ip], module_name='setup', module_args="")
data = ANS.handle_cmdb_data(ANS.get_model_result())
if data:
for ds in data:
status = ds.get('status')
if status == 0:
try:
Assets.objects.filter(id=server_assets.assets_id).update(sn=ds.get('serial'),
model=ds.get('model'),
manufacturer=ds.get(
'manufacturer'))
except Exception as e:
return JsonResponse({'msg': "数据更新失败-查询不到该主机的资产信息", "code": 403})
try:
HostInfo.objects.filter(id=server_id).update(cpu_number=ds.get('cpu_number'),
kernel=ds.get('kernel'),
selinux=ds.get('selinux'),
hostname=ds.get('hostname'),
system=ds.get('system'),
cpu=ds.get('cpu'),
disk_total=ds.get('disk_total'),
cpu_core=ds.get('cpu_core'),
swap=ds.get('swap'),
ram_total=ds.get('ram_total'),
vcpu_number=ds.get('vcpu_number')
)
recordAssets.delay(user=str(request.user),
content="修改服务器资产:{ip}".format(ip=server_assets.ip), type="server",
id=server_assets.id)
except Exception as e:
print(e)
return JsonResponse({'msg': "数据更新失败-写入数据失败", "code": 400})
else:
return JsonResponse({'msg': "数据更新失败-无法链接主机~", "code": 502})
return JsonResponse({'msg': "数据更新成功", "code": 200})
else:
return JsonResponse({'msg': "数据更新失败-请检查Ansible配置", "code": 400})
elif genre == 'crawHw':
try:
server_assets = HostInfo.objects.get(id=server_id)
assets = Assets.objects.get(id=server_assets.assets_id)
if server_assets.keyfile == 1:
resource = [{"hostname": server_assets.ip, "port": int(server_assets.port)}]
else:
resource = [
{"hostname": server_assets.ip, "port": server_assets.port, "username": server_assets.username,
"password": server_assets.passwd}]
except Exception as e:
return JsonResponse({'msg': "数据更新失败-查询不到该主机资料~", "code": 502})
ANS = ANSRunner(resource)
ANS.run_model(host_list=[server_assets.ip], module_name='crawHw', module_args="")
data = ANS.handle_cmdb_crawHw_data(ANS.get_model_result())
if data:
for ds in data:
if ds.get('mem_info'):
for mem in ds.get('mem_info'):
if Ram_Assets.objects.filter(assets=assets, device_slot=mem.get('slot')).count() > 0:
try:
Ram_Assets.objects.filter(assets=assets, device_slot=mem.get('slot')).update(
device_slot=mem.get('slot'), device_model=mem.get('serial'),
device_brand=mem.get('manufacturer'), device_volume=mem.get('size'),
device_status="Online"
)
except Exception as e:
return JsonResponse({'msg': "数据更新失败-写入数据失败", "code": 400})
else:
try:
Ram_Assets.objects.create(device_slot=mem.get('slot'),
device_model=mem.get('serial'),
device_brand=mem.get('manufacturer'),
device_volume=mem.get('size'),
device_status="Online", assets=assets
)
recordAssets.delay(user=str(request.user),
content="修改服务器资产:{ip}".format(ip=server_assets.ip),
type="server", id=server_assets.id)
except Exception as e:
return JsonResponse({'msg': "数据更新失败-写入数据失败", "code": 400})
if ds.get('disk_info'):
for disk in ds.get('disk_info'):
if Disk_Assets.objects.filter(assets=assets, device_slot=disk.get('slot')).count() > 0:
try:
Disk_Assets.objects.filter(assets=assets, device_slot=disk.get('slot')).update(
device_serial=disk.get('serial'), device_model=disk.get('model'),
device_brand=disk.get('manufacturer'), device_volume=disk.get('size'),
device_status="Online"
)
except Exception as e:
return JsonResponse({'msg': "数据更新失败-写入数据失败", "code": 400})
else:
try:
Disk_Assets.objects.create(device_serial=disk.get('serial'),
device_model=disk.get('model'),
device_brand=disk.get('manufacturer'),
device_volume=disk.get('size'),
device_status="Online", assets=assets,
device_slot=disk.get('slot')
)
recordAssets.delay(user=str(request.user),
content="修改服务器资产:{ip}".format(ip=server_assets.ip),
type="server", id=server_assets.id)
except Exception as e:
return JsonResponse({'msg': "数据更新失败-写入数据失败", "code": 400})
return JsonResponse({'msg': "数据更新成功", "code": 200})
else:
return JsonResponse({'msg': "数据更新失败,系统可能不支持,未能获取数据", "code": 400})
else:
return JsonResponse({'msg': "您没有该项操作的权限~", "code": 400})
| mit |
LoLab-VU/pymc | pymc/tests/test_step.py | 12 | 2958 | from .checks import *
from .models import simple_model, mv_simple, mv_simple_discrete, simple_2model
from theano.tensor import constant
from scipy.stats.mstats import moment
def check_stat(name, trace, var, stat, value, bound):
s = stat(trace[var][2000:], axis=0)
close_to(s, value, bound)
def test_step_continuous():
start, model, (mu, C) = mv_simple()
with model:
mh = pm.Metropolis()
slicer = pm.Slice()
hmc = pm.HamiltonianMC(scaling=C, is_cov=True, blocked=False)
nuts = pm.NUTS(scaling=C, is_cov=True, blocked=False)
mh_blocked = pm.Metropolis(S=C,
proposal_dist=pm.MultivariateNormalProposal,
blocked=True)
slicer_blocked = pm.Slice(blocked=True)
hmc_blocked = pm.HamiltonianMC(scaling=C, is_cov=True)
nuts_blocked = pm.NUTS(scaling=C, is_cov=True)
compound = pm.CompoundStep([hmc_blocked, mh_blocked])
steps = [slicer, hmc, nuts, mh_blocked, hmc_blocked,
slicer_blocked, nuts_blocked, compound]
unc = np.diag(C) ** .5
check = [('x', np.mean, mu, unc / 10.),
('x', np.std, unc, unc / 10.)]
for st in steps:
h = sample(8000, st, start, model=model, random_seed=1)
for (var, stat, val, bound) in check:
yield check_stat, repr(st), h, var, stat, val, bound
def test_non_blocked():
"""Test that samplers correctly create non-blocked compound steps.
"""
start, model = simple_2model()
with model:
# Metropolis and Slice are non-blocked by default
mh = pm.Metropolis()
assert isinstance(mh, pm.CompoundStep)
slicer = pm.Slice()
assert isinstance(slicer, pm.CompoundStep)
hmc = pm.HamiltonianMC(blocked=False)
assert isinstance(hmc, pm.CompoundStep)
nuts = pm.NUTS(blocked=False)
assert isinstance(nuts, pm.CompoundStep)
mh_blocked = pm.Metropolis(blocked=True)
assert isinstance(mh_blocked, pm.Metropolis)
slicer_blocked = pm.Slice(blocked=True)
assert isinstance(slicer_blocked, pm.Slice)
hmc_blocked = pm.HamiltonianMC()
assert isinstance(hmc_blocked, pm.HamiltonianMC)
nuts_blocked = pm.NUTS()
assert isinstance(nuts_blocked, pm.NUTS)
compound = pm.CompoundStep([hmc_blocked, mh_blocked])
def test_step_discrete():
start, model, (mu, C) = mv_simple_discrete()
with model:
mh = pm.Metropolis(S=C,
proposal_dist=pm.MultivariateNormalProposal)
slicer = pm.Slice()
steps = [mh]
unc = np.diag(C) ** .5
check = [('x', np.mean, mu, unc / 10.),
('x', np.std, unc, unc / 10.)]
for st in steps:
h = sample(20000, st, start, model=model, random_seed=1)
for (var, stat, val, bound) in check:
yield check_stat, repr(st), h, var, stat, val, bound
| apache-2.0 |
ashhher3/ibis | docs/source/conf.py | 9 | 8595 | # -*- coding: utf-8 -*-
#
# Ibis documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 10 11:06:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'numpydoc',
'ipython_sphinxext.ipython_directive',
'ipython_sphinxext.ipython_console_highlighting',
]
autosummary_generate = glob.glob("*.rst")
# autosummary_generate = True
import numpydoc
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ibis'
copyright = u'2015, Cloudera, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.2'
from ibis import __version__ as version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ibisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Ibis.tex', u'Ibis Documentation',
u'Cloudera, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ibis', u'Ibis Documentation',
[u'Cloudera, Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ibis', u'Ibis Documentation',
u'Cloudera, Inc.', 'Ibis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
stefanv/aandete | app/lib/pygments/lexers/foxpro.py | 31 | 26236 | # -*- coding: utf-8 -*-
"""
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
.. versionadded:: 1.6
"""
name = 'FoxPro'
aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
filenames = ['*.PRG', '*.prg']
mimetype = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&&).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}
| bsd-3-clause |
petewarden/tensorflow | tensorflow/python/keras/saving/saved_model/network_serialization.py | 4 | 1287 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions implementing to Network SavedModel serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.keras.saving.saved_model import model_serialization
# FunctionalModel serialization is pretty much the same as Model serialization.
class NetworkSavedModelSaver(model_serialization.ModelSavedModelSaver):
"""Network serialization."""
@property
def object_identifier(self):
return constants.NETWORK_IDENTIFIER
| apache-2.0 |
progagtudo/matemate | run.py | 1 | 1059 | import flask
import flask_restless
import flask_sqlalchemy
# Create the Flask application and the Flask-SQLAlchemy object.
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
app.config['SQLALCHEMY_ECHO'] = True
db = flask_sqlalchemy.SQLAlchemy(app)
# Create your Flask-SQLALchemy models as usual but with the following
# restriction: they must have an __init__ method that accepts keyword
# arguments for all columns (the constructor in
# flask_sqlalchemy.SQLAlchemy.Model supplies such a method, so you
# don't need to declare a new one).
import model.cart.cart_product, model.permission.available_right
# Create the database tables.
db.create_all()
# Create the Flask-Restless API manager.
manager = flask_restless.APIManager(app, flask_sqlalchemy_db=db)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
# manager.create_api(Person, methods=['GET', 'POST', 'DELETE'])
# start the flask loop
app.run() | lgpl-3.0 |
llhe/tensorflow | tensorflow/python/kernel_tests/sparse_ops_test.py | 4 | 36785 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import unittest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
[1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
(1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with self.test_session(use_gpu=False) as sess:
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
class SparseMergeHighDimTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices0 = np.array([0, 13, 10, 33, 32, 14])
indices1 = np.array([12, 4, 0, 0, 1, 30])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices0 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices0, indices_dtype), np.array(shape, np.int64))
indices1 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices1, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return ([sparse_tensor.SparseTensor.from_value(indices0),
sparse_tensor.SparseTensor.from_value(indices1)],
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
output.indices,
[[0, 0, 12], [1, 10, 0], [1, 13, 4], [1, 14, 30], [2, 32, 1],
[2, 33, 0]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3] + vocab_size)
def testInt64AndFloat32(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self):
vocab_size = [50, 30]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.dense_shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.dense_shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseResetShapeTest(test_util.TensorFlowTestCase):
_IND_2_5_6 = np.array(
[[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
def testStaticShapeInfoPreservedWhenNewShapeIsProvidedAndStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
self.assertAllEqual([3, 6, 7], sp_output.get_shape())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testFeedInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testTightBoundingBox(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testInvalidRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7], dtype=np.int64)
with self.assertRaises(ValueError):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidRankNewShapeUnavailableInGraphConstruction(self):
with self.test_session(use_gpu=False) as sess:
new_shape = array_ops.placeholder(dtype=dtypes.int64)
sp_input = self._SparseTensor_2x5x6()
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x == y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: np.array([3, 7], dtype=np.int64)})
def testInvalidDimensionSizeStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7, 5], dtype=np.int64)
with self.assertRaisesRegexp(ValueError, "should have dimension sizes"):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidDimensionSizeDynamic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = array_ops.placeholder(dtype=dtypes.int32)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: [3, 7, 5]})
def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
with self.test_session(use_gpu=False) as sess:
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self, dtype=np.int32):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64), np.array(val, dtype), np.array(
shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillFloat(self):
with self.test_session(use_gpu=False) as sess:
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
class SparseReduceSumTest(test_util.TensorFlowTestCase):
# [[1, ?, 1]
# [?, 1, ?]]
# where ? is implictly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
dense_shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims):
densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
np_ans = densified
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
keep_dims)
out_dense = tf_dense_ans.eval()
tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t, reduction_axes,
keep_dims)
# Convert to dense for comparison purposes.
out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()
self.assertAllClose(np_ans, out_dense)
self.assertAllClose(np_ans, out_sparse)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False)
self._compare(sp_t, reduction_axes, ndims, True)
@unittest.skipIf(np.__version__ == "1.13.0", "numpy 1.13 bug")
def testSimpleAndRandomInputs(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_sum(sp_t, 2).eval()
@unittest.skipIf(np.__version__ == "1.13.0", "numpy 1.13 bug")
def testGradient(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.dense_shape.eval(),
result_tensor.dense_shape.eval())
res_densified = sparse_ops.sparse_to_dense(result_tensor.indices,
result_tensor.dense_shape,
result_tensor.values).eval()
self.assertAllEqual(result_np, res_densified)
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.eval().dtype, np.float64)
def testCwiseAdd(self):
with self.test_session(use_gpu=False):
# Identity(2) + AllOnes(2,2). Should be equal to 2 * Identity(2).
indices = [[0, 0], [1, 1]]
vals = [1, 1]
shape = (2, 2)
sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
dense_t = array_ops.ones(shape, dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
dense_t = array_ops.ones([1], dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
class SparseSoftmaxTest(test_util.TensorFlowTestCase):
def testEquivalentToDensified(self):
np.random.seed(1618)
n, m = np.random.choice(20, size=2)
for dtype in [np.float32, np.float64]:
sp_vals_np = np.random.rand(n, m).astype(dtype)
batched_sp_t, unused_nnz1 = _sparsify(
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with self.test_session(use_gpu=False):
densified = constant_op.constant(sp_vals_np)
sp_result = sparse_ops.sparse_softmax(batched_sp_t).eval(
).values.reshape((n, m))
dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result.eval(), sp_result)
def testHigherRanks(self):
# For the first shape:
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
#
# The softmax results should be:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
#
# The second shape: same input data, but with a higher-rank shape.
shapes = [[2, 2, 2], [2, 1, 2, 2]]
for shape in shapes:
values = np.asarray(
[0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
expected_values = [1., 1., 1., .5, .5]
with self.test_session(use_gpu=False):
result = sparse_ops.sparse_softmax(sp_t).eval()
self.assertAllEqual(expected_values, result.values)
self.assertAllEqual(sp_t.indices.eval(), result.indices)
self.assertAllEqual(shape, result.dense_shape)
def testGradient(self):
x_shape = [2, 5, 10]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
y_tf = sparse_ops.sparse_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testBasic(self):
with self.test_session(use_gpu=False):
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one).eval()
self._assertSparseTensorValueEqual(sp_one.eval(), max_tf)
self._assertSparseTensorValueEqual(sp_zero.eval(), min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2).eval()
self._assertSparseTensorValueEqual(expected.eval(), max_tf)
self._assertSparseTensorValueEqual(expected.eval(), min_tf)
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.test_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with self.test_session(use_gpu=False):
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
class SparseTransposeTest(test.TestCase):
def testTranspose(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
with self.test_session(use_gpu=False):
np.random.seed(1618)
shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
rank = array_ops.rank(dn_input).eval()
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans).eval()
expected_trans = array_ops.transpose(dn_input, perm=perm).eval()
self.assertAllEqual(dn_trans, expected_trans)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
jiajie999/zerorpc-python | tests/testutils.py | 18 | 2046 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2015 François-Xavier Bourlet (bombela+zerorpc@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import nose.exc
import random
import os
_tmpfiles = []
def random_ipc_endpoint():
tmpfile = '/tmp/zerorpc_test_socket_{0}.sock'.format(
str(random.random())[2:])
_tmpfiles.append(tmpfile)
return 'ipc://{0}'.format(tmpfile)
def teardown():
global _tmpfiles
for tmpfile in _tmpfiles:
print 'unlink', tmpfile
try:
os.unlink(tmpfile)
except Exception:
pass
_tmpfiles = []
def skip(reason):
def _skip(test):
@functools.wraps(test)
def wrap():
raise nose.exc.SkipTest(reason)
return wrap
return _skip
try:
TIME_FACTOR = float(os.environ.get('ZPC_TEST_TIME_FACTOR'))
print 'ZPC_TEST_TIME_FACTOR:', TIME_FACTOR
except TypeError:
TIME_FACTOR = 1.0
| mit |
mobify/apns-client | apnsclient/backends/dummy.py | 18 | 4969 | # Copyright 2014 Sardar Yumatov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from struct import pack
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from . import BaseBackend, BaseConnection
from ..certificate import BaseCertificate
# python 3 support
import six
# module level logger
LOG = logging.getLogger(__name__)
class Certificate(BaseCertificate):
""" Dummy certificate """
def load_context(self, cert_string=None, cert_file=None, key_string=None, key_file=None, passphrase=None):
""" Returns None as we don't handle any context. """
return None, None
def dump_certificate(self, raw_certificate):
""" Returns dummy contents. All dummy certificates are equal. """
return "CERTIFICATE"
def dump_digest(self, raw_certificate, digest):
""" Returns dummy digsst. All dummy certificates are equal. """
return self.dump_certificate(raw_certificate)
class Backend(BaseBackend):
""" Dummy backend designed for testing without performing real IO. Serves
as an exmple for your custom backends.
"""
# simulate stdio behavior
can_detect_close = False
def __init__(self, push=None, feedback=None, **options):
""" Create new backend.
:Arguments:
- push (list): list of status codes to return while sending messages.
- feedback (int): number of tokens to generate in the feedback stream.
"""
super(Backend, self).__init__(**options)
self.push_results = push
self.push_result_pos = -1
self.feedback_results = feedback
self.new_connections = 0
assert (push is not None) ^ (feedback is not None), "Push results or feedback stream must be provided"
def get_new_connection(self, address, certificate, timeout=None):
""" Open a new connection.
:Arguments:
- address (tuple): target (host, port).
- certificate (:class:`Certificate`): certificate instance.
- timeout (float): connection timeout in seconds
"""
self.new_connections += 1
self.push_result_pos += 1
return Connection(self, address, certificate)
def get_certificate(self, cert_params):
""" Create/load certificate from parameters. """
return Certificate(**cert_params)
def create_lock(self):
""" Provides semaphore with ``threading.Lock`` interface. """
return _threading.Lock()
class Connection(BaseConnection):
""" Dummy connection. """
def __init__(self, pool, address, certificate):
""" Create new dummy connection.
:Arguments:
- pool (:class:`Backend`): dummy backend.
- address (tuple): target host and port.
- certificate (:class:`Certificate`): provider certificate.
"""
super(Connection, self).__init__(address, certificate)
self.pool = pool
self._closed = False
def closed(self):
""" Returns True if :func:`close` has been explicitly called. """
return self._closed
def close(self):
""" Marks this connection as closed. """
self._closed = True
def reset(self):
""" Reset dummy connection to use next result record. """
pass
def write(self, data, timeout):
""" Does nothing, always succeeds. """
if self.closed():
raise IOError("Connection closed")
def peek(self, size):
""" Always returns None as we never fail prematurely. """
return None
def read(self, size, timeout):
""" Iterates over preconfigured send/feedback responses. """
if self.closed():
return None
if self.pool.push_results is not None:
# we are push connection
ret = self.pool.push_results[self.pool.push_result_pos % len(self.pool.push_results)]
if ret is not None:
ret = pack(">BBI", 8, ret, 0)
return ret
else: # feedback mode
ret = []
for x in range(0, self.pool.feedback_results):
token = six.b("test_{0}".format(x))
ret.append(pack(">IH{0}s".format(len(token)), int(time.time()), len(token), token))
self.close()
return six.binary_type().join(ret)
| apache-2.0 |
porcobosso/spark-ec2 | lib/boto-2.34.0/boto/iam/connection.py | 12 | 57215 | # Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
import boto.jsonresponse
from boto.compat import json, six
from boto.resultset import ResultSet
from boto.iam.summarymap import SummaryMap
from boto.connection import AWSQueryConnection
DEFAULT_POLICY_DOCUMENTS = {
'default': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
'amazonaws.com.cn': {
'Statement': [
{
'Principal': {
'Service': ['ec2.amazonaws.com.cn']
},
'Effect': 'Allow',
'Action': ['sts:AssumeRole']
}
]
},
}
# For backward-compatibility, we'll preserve this here.
ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default'])
class IAMConnection(AWSQueryConnection):
APIVersion = '2010-05-08'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
debug=0, https_connection_factory=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
super(IAMConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory,
path, security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
def get_response(self, action, params, path='/', parent=None,
verb='POST', list_marker='Set'):
"""
Utility method to handle calls to IAM and parsing of responses.
"""
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if response.status == 200:
if body:
e = boto.jsonresponse.Element(list_marker=list_marker,
pythonize_name=True)
h = boto.jsonresponse.XmlHandler(e, parent)
h.parse(body)
return e
else:
# Support empty responses, e.g. deleting a SAML provider
# according to the official documentation.
return {}
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
#
# Group methods
#
def get_all_groups(self, path_prefix='/', marker=None, max_items=None):
"""
List the groups that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only groups whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroups', params,
list_marker='Groups')
def get_group(self, group_name, marker=None, max_items=None):
"""
Return a list of users that are in the specified group.
:type group_name: string
:param group_name: The name of the group whose information should
be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('GetGroup', params, list_marker='Users')
def create_group(self, group_name, path='/'):
"""
Create a group.
:type group_name: string
:param group_name: The name of the new group
:type path: string
:param path: The path to the group (Optional). Defaults to /.
"""
params = {'GroupName': group_name,
'Path': path}
return self.get_response('CreateGroup', params)
def delete_group(self, group_name):
"""
Delete a group. The group must not contain any Users or
have any attached policies
:type group_name: string
:param group_name: The name of the group to delete.
"""
params = {'GroupName': group_name}
return self.get_response('DeleteGroup', params)
def update_group(self, group_name, new_group_name=None, new_path=None):
"""
Updates name and/or path of the specified group.
:type group_name: string
:param group_name: The name of the new group
:type new_group_name: string
:param new_group_name: If provided, the name of the group will be
changed to this name.
:type new_path: string
:param new_path: If provided, the path of the group will be
changed to this path.
"""
params = {'GroupName': group_name}
if new_group_name:
params['NewGroupName'] = new_group_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateGroup', params)
def add_user_to_group(self, group_name, user_name):
"""
Add a user to a group
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The to be added to the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('AddUserToGroup', params)
def remove_user_from_group(self, group_name, user_name):
"""
Remove a user from a group.
:type group_name: string
:param group_name: The name of the group
:type user_name: string
:param user_name: The user to remove from the group.
"""
params = {'GroupName': group_name,
'UserName': user_name}
return self.get_response('RemoveUserFromGroup', params)
def put_group_policy(self, group_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutGroupPolicy', params, verb='POST')
def get_all_group_policies(self, group_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'GroupName': group_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupPolicies', params,
list_marker='PolicyNames')
def get_group_policy(self, group_name, policy_name):
"""
Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('GetGroupPolicy', params, verb='POST')
def delete_group_policy(self, group_name, policy_name):
"""
Deletes the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'GroupName': group_name,
'PolicyName': policy_name}
return self.get_response('DeleteGroupPolicy', params, verb='POST')
def get_all_users(self, path_prefix='/', marker=None, max_items=None):
"""
List the users that have the specified path prefix.
:type path_prefix: string
:param path_prefix: If provided, only users whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'PathPrefix': path_prefix}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUsers', params, list_marker='Users')
#
# User methods
#
def create_user(self, user_name, path='/'):
"""
Create a user.
:type user_name: string
:param user_name: The name of the new user
:type path: string
:param path: The path in which the user will be created.
Defaults to /.
"""
params = {'UserName': user_name,
'Path': path}
return self.get_response('CreateUser', params)
def delete_user(self, user_name):
"""
Delete a user including the user's path, GUID and ARN.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteUser', params)
def get_user(self, user_name=None):
"""
Retrieve information about the specified user.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The name of the user to retrieve.
If not specified, defaults to user making request.
"""
params = {}
if user_name:
params['UserName'] = user_name
return self.get_response('GetUser', params)
def update_user(self, user_name, new_user_name=None, new_path=None):
"""
Updates name and/or path of the specified user.
:type user_name: string
:param user_name: The name of the user
:type new_user_name: string
:param new_user_name: If provided, the username of the user will be
changed to this username.
:type new_path: string
:param new_path: If provided, the path of the user will be
changed to this path.
"""
params = {'UserName': user_name}
if new_user_name:
params['NewUserName'] = new_user_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateUser', params)
def get_all_user_policies(self, user_name, marker=None, max_items=None):
"""
List the names of the policies associated with the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListUserPolicies', params,
list_marker='PolicyNames')
def put_user_policy(self, user_name, policy_name, policy_json):
"""
Adds or updates the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
"""
params = {'UserName': user_name,
'PolicyName': policy_name,
'PolicyDocument': policy_json}
return self.get_response('PutUserPolicy', params, verb='POST')
def get_user_policy(self, user_name, policy_name):
"""
Retrieves the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('GetUserPolicy', params, verb='POST')
def delete_user_policy(self, user_name, policy_name):
"""
Deletes the specified policy document for the specified user.
:type user_name: string
:param user_name: The name of the user the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to delete.
"""
params = {'UserName': user_name,
'PolicyName': policy_name}
return self.get_response('DeleteUserPolicy', params, verb='POST')
def get_groups_for_user(self, user_name, marker=None, max_items=None):
"""
List the groups that a specified user belongs to.
:type user_name: string
:param user_name: The name of the user to list groups for.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListGroupsForUser', params,
list_marker='Groups')
#
# Access Keys
#
def get_all_access_keys(self, user_name, marker=None, max_items=None):
"""
Get all access keys associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListAccessKeys', params,
list_marker='AccessKeyMetadata')
def create_access_key(self, user_name=None):
"""
Create a new AWS Secret Access Key and corresponding AWS Access Key ID
for the specified user. The default status for new keys is Active
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('CreateAccessKey', params)
def update_access_key(self, access_key_id, status, user_name=None):
"""
Changes the status of the specified access key from Active to Inactive
or vice versa. This action can be used to disable a user's key as
part of a key rotation workflow.
If the user_name is not specified, the user_name is determined
implicitly based on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key.
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of user (optional).
"""
params = {'AccessKeyId': access_key_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateAccessKey', params)
def delete_access_key(self, access_key_id, user_name=None):
"""
Delete an access key associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key to be deleted.
:type user_name: string
:param user_name: The username of the user
"""
params = {'AccessKeyId': access_key_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteAccessKey', params)
#
# Signing Certificates
#
def get_all_signing_certs(self, marker=None, max_items=None,
user_name=None):
"""
Get all signing certificates associated with an account.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
:type user_name: string
:param user_name: The username of the user
"""
params = {}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
if user_name:
params['UserName'] = user_name
return self.get_response('ListSigningCertificates',
params, list_marker='Certificates')
def update_signing_cert(self, cert_id, status, user_name=None):
"""
Change the status of the specified signing certificate from
Active to Inactive or vice versa.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_id: string
:param cert_id: The ID of the signing certificate
:type status: string
:param status: Either Active or Inactive.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateId': cert_id,
'Status': status}
if user_name:
params['UserName'] = user_name
return self.get_response('UpdateSigningCertificate', params)
def upload_signing_cert(self, cert_body, user_name=None):
"""
Uploads an X.509 signing certificate and associates it with
the specified user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type cert_body: string
:param cert_body: The body of the signing certificate.
:type user_name: string
:param user_name: The username of the user
"""
params = {'CertificateBody': cert_body}
if user_name:
params['UserName'] = user_name
return self.get_response('UploadSigningCertificate', params,
verb='POST')
def delete_signing_cert(self, cert_id, user_name=None):
"""
Delete a signing certificate associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type user_name: string
:param user_name: The username of the user
:type cert_id: string
:param cert_id: The ID of the certificate.
"""
params = {'CertificateId': cert_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteSigningCertificate', params)
#
# Server Certificates
#
def list_server_certs(self, path_prefix='/',
marker=None, max_items=None):
"""
Lists the server certificates that have the specified path prefix.
If none exist, the action returns an empty list.
:type path_prefix: string
:param path_prefix: If provided, only certificates whose paths match
the provided prefix will be returned.
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {}
if path_prefix:
params['PathPrefix'] = path_prefix
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListServerCertificates',
params,
list_marker='ServerCertificateMetadataList')
# Preserves backwards compatibility.
# TODO: Look into deprecating this eventually?
get_all_server_certs = list_server_certs
def update_server_cert(self, cert_name, new_cert_name=None,
new_path=None):
"""
Updates the name and/or the path of the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate that you want
to update.
:type new_cert_name: string
:param new_cert_name: The new name for the server certificate.
Include this only if you are updating the
server certificate's name.
:type new_path: string
:param new_path: If provided, the path of the certificate will be
changed to this path.
"""
params = {'ServerCertificateName': cert_name}
if new_cert_name:
params['NewServerCertificateName'] = new_cert_name
if new_path:
params['NewPath'] = new_path
return self.get_response('UpdateServerCertificate', params)
def upload_server_cert(self, cert_name, cert_body, private_key,
cert_chain=None, path=None):
"""
Uploads a server certificate entity for the AWS Account.
The server certificate entity includes a public key certificate,
a private key, and an optional certificate chain, which should
all be PEM-encoded.
:type cert_name: string
:param cert_name: The name for the server certificate. Do not
include the path in this value.
:type cert_body: string
:param cert_body: The contents of the public key certificate
in PEM-encoded format.
:type private_key: string
:param private_key: The contents of the private key in
PEM-encoded format.
:type cert_chain: string
:param cert_chain: The contents of the certificate chain. This
is typically a concatenation of the PEM-encoded
public key certificates of the chain.
:type path: string
:param path: The path for the server certificate.
"""
params = {'ServerCertificateName': cert_name,
'CertificateBody': cert_body,
'PrivateKey': private_key}
if cert_chain:
params['CertificateChain'] = cert_chain
if path:
params['Path'] = path
return self.get_response('UploadServerCertificate', params,
verb='POST')
def get_server_certificate(self, cert_name):
"""
Retrieves information about the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to retrieve information about.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('GetServerCertificate', params)
def delete_server_cert(self, cert_name):
"""
Delete the specified server certificate.
:type cert_name: string
:param cert_name: The name of the server certificate you want
to delete.
"""
params = {'ServerCertificateName': cert_name}
return self.get_response('DeleteServerCertificate', params)
#
# MFA Devices
#
def get_all_mfa_devices(self, user_name, marker=None, max_items=None):
"""
Get all MFA devices associated with an account.
:type user_name: string
:param user_name: The username of the user
:type marker: string
:param marker: Use this only when paginating results and only
in follow-up request after you've received a response
where the results are truncated. Set this to the value of
the Marker element in the response you just received.
:type max_items: int
:param max_items: Use this only when paginating results to indicate
the maximum number of groups you want in the response.
"""
params = {'UserName': user_name}
if marker:
params['Marker'] = marker
if max_items:
params['MaxItems'] = max_items
return self.get_response('ListMFADevices',
params, list_marker='MFADevices')
def enable_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Enables the specified MFA device and associates it with the
specified user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('EnableMFADevice', params)
def deactivate_mfa_device(self, user_name, serial_number):
"""
Deactivates the specified MFA device and removes it from
association with the user.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number}
return self.get_response('DeactivateMFADevice', params)
def resync_mfa_device(self, user_name, serial_number,
auth_code_1, auth_code_2):
"""
Syncronizes the specified MFA device with the AWS servers.
:type user_name: string
:param user_name: The username of the user
:type serial_number: string
:param serial_number: The serial number which uniquely identifies
the MFA device.
:type auth_code_1: string
:param auth_code_1: An authentication code emitted by the device.
:type auth_code_2: string
:param auth_code_2: A subsequent authentication code emitted
by the device.
"""
params = {'UserName': user_name,
'SerialNumber': serial_number,
'AuthenticationCode1': auth_code_1,
'AuthenticationCode2': auth_code_2}
return self.get_response('ResyncMFADevice', params)
#
# Login Profiles
#
def get_login_profiles(self, user_name):
"""
Retrieves the login profile for the specified user.
:type user_name: string
:param user_name: The username of the user
"""
params = {'UserName': user_name}
return self.get_response('GetLoginProfile', params)
def create_login_profile(self, user_name, password):
"""
Creates a login profile for the specified user, give the user the
ability to access AWS services and the AWS Management Console.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('CreateLoginProfile', params)
def delete_login_profile(self, user_name):
"""
Deletes the login profile associated with the specified user.
:type user_name: string
:param user_name: The name of the user to delete.
"""
params = {'UserName': user_name}
return self.get_response('DeleteLoginProfile', params)
def update_login_profile(self, user_name, password):
"""
Resets the password associated with the user's login profile.
:type user_name: string
:param user_name: The name of the user
:type password: string
:param password: The new password for the user
"""
params = {'UserName': user_name,
'Password': password}
return self.get_response('UpdateLoginProfile', params)
def create_account_alias(self, alias):
"""
Creates a new alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to attach to the account.
"""
params = {'AccountAlias': alias}
return self.get_response('CreateAccountAlias', params)
def delete_account_alias(self, alias):
"""
Deletes an alias for the AWS account.
For more information on account id aliases, please see
http://goo.gl/ToB7G
:type alias: string
:param alias: The alias to remove from the account.
"""
params = {'AccountAlias': alias}
return self.get_response('DeleteAccountAlias', params)
def get_account_alias(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_response('ListAccountAliases', {},
list_marker='AccountAliases')
def get_signin_url(self, service='ec2'):
"""
Get the URL where IAM users can use their login profile to sign in
to this account's console.
:type service: string
:param service: Default service to go to in the console.
"""
alias = self.get_account_alias()
if not alias:
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
resp = alias.get('list_account_aliases_response', {})
result = resp.get('list_account_aliases_result', {})
aliases = result.get('account_aliases', [])
if not len(aliases):
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
# We'll just use the first one we find.
alias = aliases[0]
if self.host == 'iam.us-gov.amazonaws.com':
return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (
alias,
service
)
elif self.host.endswith('amazonaws.com.cn'):
return "https://%s.signin.amazonaws.cn/console/%s" % (
alias,
service
)
else:
return "https://%s.signin.aws.amazon.com/console/%s" % (
alias,
service
)
def get_account_summary(self):
"""
Get the alias for the current account.
This is referred to in the docs as list_account_aliases,
but it seems you can only have one account alias currently.
For more information on account id aliases, please see
http://goo.gl/ToB7G
"""
return self.get_object('GetAccountSummary', {}, SummaryMap)
#
# IAM Roles
#
def add_role_to_instance_profile(self, instance_profile_name, role_name):
"""
Adds the specified role to the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to add.
"""
return self.get_response('AddRoleToInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def create_instance_profile(self, instance_profile_name, path=None):
"""
Creates a new instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to create.
:type path: string
:param path: The path to the instance profile.
"""
params = {'InstanceProfileName': instance_profile_name}
if path is not None:
params['Path'] = path
return self.get_response('CreateInstanceProfile', params)
def _build_policy(self, assume_role_policy_document=None):
if assume_role_policy_document is not None:
if isinstance(assume_role_policy_document, six.string_types):
# Historically, they had to pass a string. If it's a string,
# assume the user has already handled it.
return assume_role_policy_document
else:
for tld, policy in DEFAULT_POLICY_DOCUMENTS.items():
if tld is 'default':
# Skip the default. We'll fall back to it if we don't find
# anything.
continue
if self.host and self.host.endswith(tld):
assume_role_policy_document = policy
break
if not assume_role_policy_document:
assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default']
# Dump the policy (either user-supplied ``dict`` or one of the defaults)
return json.dumps(assume_role_policy_document)
def create_role(self, role_name, assume_role_policy_document=None, path=None):
"""
Creates a new role for your AWS account.
The policy grants permission to an EC2 instance to assume the role.
The policy is URL-encoded according to RFC 3986. Currently, only EC2
instances can assume roles.
:type role_name: string
:param role_name: Name of the role to create.
:type assume_role_policy_document: ``string`` or ``dict``
:param assume_role_policy_document: The policy that grants an entity
permission to assume the role.
:type path: string
:param path: The path to the role.
"""
params = {
'RoleName': role_name,
'AssumeRolePolicyDocument': self._build_policy(
assume_role_policy_document
),
}
if path is not None:
params['Path'] = path
return self.get_response('CreateRole', params)
def delete_instance_profile(self, instance_profile_name):
"""
Deletes the specified instance profile. The instance profile must not
have an associated role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to delete.
"""
return self.get_response(
'DeleteInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def delete_role(self, role_name):
"""
Deletes the specified role. The role must not have any policies
attached.
:type role_name: string
:param role_name: Name of the role to delete.
"""
return self.get_response('DeleteRole', {'RoleName': role_name})
def delete_role_policy(self, role_name, policy_name):
"""
Deletes the specified policy associated with the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to delete.
"""
return self.get_response(
'DeleteRolePolicy',
{'RoleName': role_name, 'PolicyName': policy_name})
def get_instance_profile(self, instance_profile_name):
"""
Retrieves information about the specified instance profile, including
the instance profile's path, GUID, ARN, and role.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to get
information about.
"""
return self.get_response('GetInstanceProfile',
{'InstanceProfileName': instance_profile_name})
def get_role(self, role_name):
"""
Retrieves information about the specified role, including the role's
path, GUID, ARN, and the policy granting permission to EC2 to assume
the role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
"""
return self.get_response('GetRole', {'RoleName': role_name})
def get_role_policy(self, role_name, policy_name):
"""
Retrieves the specified policy document for the specified role.
:type role_name: string
:param role_name: Name of the role associated with the policy.
:type policy_name: string
:param policy_name: Name of the policy to get.
"""
return self.get_response('GetRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name})
def list_instance_profiles(self, path_prefix=None, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified path prefix. If
there are none, the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results. For
example: /application_abc/component_xyz/, which would get all
instance profiles whose path starts with
/application_abc/component_xyz/.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfiles', params,
list_marker='InstanceProfiles')
def list_instance_profiles_for_role(self, role_name, marker=None,
max_items=None):
"""
Lists the instance profiles that have the specified associated role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list instance profiles for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
Marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListInstanceProfilesForRole', params,
list_marker='InstanceProfiles')
def list_role_policies(self, role_name, marker=None, max_items=None):
"""
Lists the names of the policies associated with the specified role. If
there are none, the action returns an empty list.
:type role_name: string
:param role_name: The name of the role to list policies for.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {'RoleName': role_name}
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRolePolicies', params,
list_marker='PolicyNames')
def list_roles(self, path_prefix=None, marker=None, max_items=None):
"""
Lists the roles that have the specified path prefix. If there are none,
the action returns an empty list.
:type path_prefix: string
:param path_prefix: The path prefix for filtering the results.
:type marker: string
:param marker: Use this parameter only when paginating results, and
only in a subsequent request after you've received a response
where the results are truncated. Set it to the value of the
marker element in the response you just received.
:type max_items: int
:param max_items: Use this parameter only when paginating results to
indicate the maximum number of user names you want in the response.
"""
params = {}
if path_prefix is not None:
params['PathPrefix'] = path_prefix
if marker is not None:
params['Marker'] = marker
if max_items is not None:
params['MaxItems'] = max_items
return self.get_response('ListRoles', params, list_marker='Roles')
def put_role_policy(self, role_name, policy_name, policy_document):
"""
Adds (or updates) a policy document associated with the specified role.
:type role_name: string
:param role_name: Name of the role to associate the policy with.
:type policy_name: string
:param policy_name: Name of the policy document.
:type policy_document: string
:param policy_document: The policy document.
"""
return self.get_response('PutRolePolicy',
{'RoleName': role_name,
'PolicyName': policy_name,
'PolicyDocument': policy_document})
def remove_role_from_instance_profile(self, instance_profile_name,
role_name):
"""
Removes the specified role from the specified instance profile.
:type instance_profile_name: string
:param instance_profile_name: Name of the instance profile to update.
:type role_name: string
:param role_name: Name of the role to remove.
"""
return self.get_response('RemoveRoleFromInstanceProfile',
{'InstanceProfileName': instance_profile_name,
'RoleName': role_name})
def update_assume_role_policy(self, role_name, policy_document):
"""
Updates the policy that grants an entity permission to assume a role.
Currently, only an Amazon EC2 instance can assume a role.
:type role_name: string
:param role_name: Name of the role to update.
:type policy_document: string
:param policy_document: The policy that grants an entity permission to
assume the role.
"""
return self.get_response('UpdateAssumeRolePolicy',
{'RoleName': role_name,
'PolicyDocument': policy_document})
def create_saml_provider(self, saml_metadata_document, name):
"""
Creates an IAM entity to describe an identity provider (IdP)
that supports SAML 2.0.
The SAML provider that you create with this operation can be
used as a principal in a role's trust policy to establish a
trust relationship between AWS and a SAML identity provider.
You can create an IAM role that supports Web-based single
sign-on (SSO) to the AWS Management Console or one that
supports API access to AWS.
When you create the SAML provider, you upload an a SAML
metadata document that you get from your IdP and that includes
the issuer's name, expiration information, and keys that can
be used to validate the SAML authentication response
(assertions) that are received from the IdP. You must generate
the metadata document using the identity management software
that is used as your organization's IdP.
This operation requires `Signature Version 4`_.
For more information, see `Giving Console Access Using SAML`_
and `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Credentials guide.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
For more information, see `Creating Temporary Security Credentials for
SAML Federation`_ in the Using Temporary Security Credentials
guide.
:type name: string
:param name: The name of the provider to create.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'Name': name,
}
return self.get_response('CreateSAMLProvider', params)
def list_saml_providers(self):
"""
Lists the SAML providers in the account.
This operation requires `Signature Version 4`_.
"""
return self.get_response('ListSAMLProviders', {}, list_marker='SAMLProviderList')
def get_saml_provider(self, saml_provider_arn):
"""
Returns the SAML provider metadocument that was uploaded when
the provider was created or updated.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to get information about.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('GetSAMLProvider', params)
def update_saml_provider(self, saml_provider_arn, saml_metadata_document):
"""
Updates the metadata document for an existing SAML provider.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to update.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'SAMLProviderArn': saml_provider_arn,
}
return self.get_response('UpdateSAMLProvider', params)
def delete_saml_provider(self, saml_provider_arn):
"""
Deletes a SAML provider.
Deleting the provider does not update any roles that reference
the SAML provider as a principal in their trust policies. Any
attempt to assume a role that references a SAML provider that
has been deleted will fail.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to delete.
"""
params = {'SAMLProviderArn': saml_provider_arn}
return self.get_response('DeleteSAMLProvider', params)
#
# IAM Reports
#
def generate_credential_report(self):
"""
Generates a credential report for an account
A new credential report can only be generated every 4 hours. If one
hasn't been generated in the last 4 hours then get_credential_report
will error when called
"""
params = {}
return self.get_response('GenerateCredentialReport', params)
def get_credential_report(self):
"""
Retrieves a credential report for an account
A report must have been generated in the last 4 hours to succeed.
The report is returned as a base64 encoded blob within the response.
"""
params = {}
return self.get_response('GetCredentialReport', params)
def create_virtual_mfa_device(self, path, device_name):
"""
Creates a new virtual MFA device for the AWS account.
After creating the virtual MFA, use enable-mfa-device to
attach the MFA device to an IAM user.
:type path: string
:param path: The path for the virtual MFA device.
:type device_name: string
:param device_name: The name of the virtual MFA device.
Used with path to uniquely identify a virtual MFA device.
"""
params = {
'Path': path,
'VirtualMFADeviceName': device_name
}
return self.get_response('CreateVirtualMFADevice', params)
| apache-2.0 |
pgmillon/ansible | lib/ansible/modules/cloud/amazon/aws_codepipeline.py | 3 | 11141 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_codepipeline
short_description: Create or delete AWS CodePipelines
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html)
description:
- Create or delete a CodePipeline on AWS.
version_added: "2.9"
author:
- Stefan Horning (@stefanhorning) <horning@mediapeers.com>
requirements: [ botocore, boto3 ]
options:
name:
description:
- Name of the pipeline
required: true
role_arn:
description:
- ARN of the IAM role to use when executing the pipeline
required: true
artifact_store:
description:
- Location information where artifacts are stored (on S3). Dictionary with fields type and location.
required: true
suboptions:
type:
description:
- Type of the artifacts storage (only 'S3' is currently supported).
location:
description:
- Bucket name for artifacts.
stages:
description:
- List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage.
required: true
suboptions:
name:
description:
- Name of the stage (step) in the codepipeline
actions:
description:
- List of action configurations for that stage.
version:
description:
- Version number of the pipeline. This number is automatically incremented when a pipeline is updated.
required: false
state:
description:
- Create or remove code pipeline
default: 'present'
choices: ['present', 'absent']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container)
- code_pipeline:
name: my_deploy_pipeline
role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service
artifact_store:
type: S3
location: my_s3_codepipline_bucket
stages:
- name: Get_source
actions:
-
name: Git_pull
actionTypeId:
category: Source
owner: ThirdParty
provider: GitHub
version: '1'
outputArtifacts:
- { name: my-app-source }
configuration:
Owner: mediapeers
Repo: my_gh_repo
PollForSourceChanges: 'true'
Branch: master
# Generate token like this:
# https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html
# GH Link: https://github.com/settings/tokens
OAuthToken: 'abc123def456'
runOrder: 1
- name: Build
actions:
-
name: CodeBuild
actionTypeId:
category: Build
owner: AWS
provider: CodeBuild
version: '1'
inputArtifacts:
- { name: my-app-source }
outputArtifacts:
- { name: my-app-build }
configuration:
# A project with that name needs to be setup on AWS CodeBuild already (use code_build module).
ProjectName: codebuild-project-name
runOrder: 1
- name: ECS_deploy
actions:
-
name: ECS_deploy
actionTypeId:
category: Deploy
owner: AWS
provider: ECS
version: '1'
inputArtifacts:
- { name: vod-api-app-build }
configuration:
# an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module)
ClusterName: ecs-cluster-name
ServiceName: ecs-cluster-service-name
FileName: imagedefinitions.json
region: us-east-1
state: present
'''
RETURN = '''
pipeline:
description: Returns the dictionary desribing the code pipeline configuration.
returned: success
type: complex
contains:
name:
description: Name of the CodePipeline
returned: always
type: string
sample: my_deploy_pipeline
role_arn:
description: ARN of the IAM role attached to the code pipeline
returned: always
type: string
sample: arn:aws:iam::123123123:role/codepipeline-service-role
artifact_store:
description: Information about where the build artifacts are stored
returned: always
type: complex
contains:
type:
description: The type of the artifacts store, such as S3
returned: always
type: string
sample: S3
location:
description: The location of the artifacts storage (s3 bucket name)
returned: always
type: string
sample: my_s3_codepipline_bucket
encryption_key:
description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key.
returned: when configured
type: string
stages:
description: List of stages configured for this pipeline
returned: always
type: list
version:
description: The version number of the pipeline. This number is auto incremented when pipeline params are changed.
returned: always
type: int
'''
import copy
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def create_pipeline(client, name, role_arn, artifact_store, stages, version, module):
pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages}
if version:
pipeline_dict['version'] = version
try:
resp = client.create_pipeline(pipeline=pipeline_dict)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable create pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to create pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc())
def update_pipeline(client, pipeline_dict, module):
try:
resp = client.update_pipeline(pipeline=pipeline_dict)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
exception=traceback.format_exc())
def delete_pipeline(client, name, module):
try:
resp = client.delete_pipeline(name=name)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc())
def describe_pipeline(client, name, version, module):
pipeline = {}
try:
if version is not None:
pipeline = client.get_pipeline(name=name, version=version)
return pipeline
else:
pipeline = client.get_pipeline(name=name)
return pipeline
except is_boto3_error_code('PipelineNotFoundException'):
return pipeline
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e)
def main():
argument_spec = dict(
name=dict(required=True, type='str'),
role_arn=dict(required=True, type='str'),
artifact_store=dict(required=True, type='dict'),
stages=dict(required=True, type='list'),
version=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present')
)
module = AnsibleAWSModule(argument_spec=argument_spec)
client_conn = module.client('codepipeline')
state = module.params.get('state')
changed = False
# Determine if the CodePipeline exists
found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module)
pipeline_result = {}
if state == 'present':
if 'pipeline' in found_code_pipeline:
pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline'])
# Update dictionary with provided module params:
pipeline_dict['roleArn'] = module.params['role_arn']
pipeline_dict['artifactStore'] = module.params['artifact_store']
pipeline_dict['stages'] = module.params['stages']
if module.params['version'] is not None:
pipeline_dict['version'] = module.params['version']
pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module)
if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']):
changed = True
else:
pipeline_result = create_pipeline(
client=client_conn,
name=module.params['name'],
role_arn=module.params['role_arn'],
artifact_store=module.params['artifact_store'],
stages=module.params['stages'],
version=module.params['version'],
module=module)
changed = True
elif state == 'absent':
if found_code_pipeline:
pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module)
changed = True
module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result))
if __name__ == '__main__':
main()
| gpl-3.0 |
seize-the-dave/XlsxWriter | xlsxwriter/test/comparison/test_default_row01.py | 8 | 1091 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'default_row01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_default_row(24)
worksheet.write('A1', 'Foo')
worksheet.write('A10', 'Bar')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
vcoin-project/v | qa/rpc-tests/invalidblockrequest.py | 21 | 4204 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import copy
import time
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
# chr(81) is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, chr(81), 50*100000000)
tx2 = create_transaction(tx1, 0, chr(81), 50*100000000)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, RejectResult(16,'bad-txns-duplicate')], [block2_orig, True]])
height += 1
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 100*100000000 # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, RejectResult(16,'bad-cb-amount')]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| mit |
dlacombejr/deepy | deepy/utils/train_logger.py | 3 | 1113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import datetime
import logging as loggers
import deepy
logging = loggers.getLogger(__name__)
class TrainLogger(object):
def __init__(self):
self.log_pool = []
def load(self, model_path):
log_path = self._log_path(model_path)
if os.path.exists(log_path):
logging.info("Load training log from %s" % log_path)
for line in open(log_path).xreadlines():
self.log_pool.append(line.strip())
def record(self, line):
time_mark = datetime.datetime.now().strftime("[%Y/%m/%d %H:%M:%S] ")
self.log_pool.append(time_mark + line)
def save(self, model_path):
log_path = self._log_path(model_path)
logging.info("Save training log to %s" % log_path)
with open(log_path, "w") as outf:
outf.write("# deepy version: %s\n" % deepy.__version__)
for line in self.log_pool:
outf.write(line + "\n")
def _log_path(self, model_path):
log_path = model_path.rsplit(".", 1)[0] + ".log"
return log_path | mit |
awemulya/fieldsight-kobocat | onadata/apps/userrole/models.py | 1 | 7498 | from datetime import datetime
from django.core.urlresolvers import reverse
from fcm.utils import get_device_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from onadata.apps.fieldsight.models import Site, Project, Organization
from onadata.apps.staff.models import StaffProject
class UserRole(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="user_roles")
group = models.ForeignKey(Group)
started_at = models.DateTimeField(default=datetime.now)
ended_at = models.DateTimeField(blank=True, null=True)
site = models.ForeignKey(Site, null=True, blank=True, related_name='site_roles')
project = models.ForeignKey(Project, null=True, blank=True, related_name='project_roles')
organization = models.ForeignKey(Organization, null=True, blank=True, related_name='organization_roles')
staff_project = models.ForeignKey(StaffProject, null=True, blank=True, related_name='staff_project_roles')
logs = GenericRelation('eventlog.FieldSightLog')
def __unicode__(self):
return 'user: {}\'s role : {}'.format(self.user.__unicode__(), self.group.__unicode__())
def as_json(self):
return dict(
user = self.user.get_full_name(), email = self.user.email
)
def getname(self):
return str("role")
# class Meta:
# unique_together = ('user', 'group', 'organization', 'project', 'site', 'ended_at',)
def get_absolute_url(self):
return reverse('users:profile', kwargs={'pk': self.user.pk})
def clean(self):
if self.group.name in ['Site Supervisor', 'Reviewer'] and not self.site_id:
raise ValidationError({
'site': ValidationError(_('Missing site.'), code='required'),
})
if self.group.name == 'Project Manager' and not self.project_id:
raise ValidationError({
'project': ValidationError(_('Missing Project.'), code='required'),
})
if self.group.name == 'Organization Admin' and not self.organization_id:
raise ValidationError({
'organization': ValidationError(_('Missing Organization.'), code='required'),
})
if self.user and UserRole.objects.filter(user=self.user, group=self.group, project=self.project, site=self.site).exists():
raise ValidationError({
'user': ValidationError(_('User Role Already Exists.')),
})
def save(self, *args, **kwargs):
if self.group.name == 'Super Admin':
self.organization = None
self.project = None
self.site = None
elif self.group.name == 'Organization Admin':
self.project = None
self.site = None
elif self.group.name == 'Project Manager':
self.site = None
self.organization = self.project.organization
elif self.group.name == 'Project Doner':
self.site = None
self.organization = self.project.organization
elif self.group.name in ['Site Supervisor', 'Reviewer']:
self.project = self.site.project
self.organization = self.site.project.organization
elif self.group.name == 'Staff Project Manager':
self.organization = None
self.project = None
self.site = None
super(UserRole, self).save(*args, **kwargs)
def update(self, *args, **kwargs):
if self.group.name == 'Super Admin':
self.organization = None
self.project = None
self.site = None
elif self.group.name == 'Organization Admin':
self.project = None
self.site = None
elif self.group.name == 'Project Manager':
self.site = None
self.organization = self.project.organization
elif self.group.name in ['Site Supervisor', 'Reviewer']:
self.project = self.site.project
self.organization = self.site.project.organization
super(UserRole, self).update(*args, **kwargs)
@staticmethod
def is_active(user,group):
return UserRole.objects.filter(user=user, group__name=group,ended_at=None).count()
@staticmethod
def get_active_roles(user):
return UserRole.objects.filter(user=user,ended_at=None).select_related('group', 'organization')
@staticmethod
def get_active_site_roles(user):
return UserRole.objects.filter(user=user, ended_at=None, group__name="Site Supervisor", site__isnull=False, site__is_active=True).\
select_related('project', 'site', 'site__type', 'project__organization', 'project__type')\
@staticmethod
def get_active_site_roles_count(user):
return UserRole.objects.filter(user=user, ended_at=None, group__name="Site Supervisor", site__isnull=False, site__is_active=True).count()
@staticmethod
def get_active_site_roles_exists(user):
return UserRole.objects.filter(user=user, ended_at=None, group__name="Site Supervisor", site__isnull=False, site__is_active=True).exists()
@staticmethod
def get_roles_supervisor(user, project_id):
return True if UserRole.objects.filter(user=user, ended_at=None, group__name="Site Supervisor", site__isnull=False, site__is_active=True,
project__id=project_id).select_related('group', 'project')\
.exists() else False
@staticmethod
def project_managers(project):
return UserRole.objects.filter(project=project, ended_at=None, group__name="Project Manager").\
select_related('group', 'project')
@staticmethod
def organization_admins(organization):
return UserRole.objects.filter(organization=organization, ended_at=None, group__name="Organization Admin").\
select_related('group', 'organization')\
@staticmethod
def central_engineers(project):
return UserRole.objects.filter(project=project, ended_at=None, group__name="Reviewer").\
select_related('group', 'project')
@property
def has_multiple_project_roles(self):
return UserRole.objects.filter(user=self.user, group__name__in=['Project Manager', 'Reviewer'], organization=self.organization).count() > 1
@property
def both_project_roles(self):
return UserRole.objects.filter(user=self.user, group__name__in=['Project Manager', 'Reviewer'], organization=self.organization)
@receiver(post_save, sender=UserRole)
def create_messages(sender, instance, created, **kwargs):
if created and instance.site is not None and instance.group.name in ["Site Supervisor"]:
Device = get_device_model()
if Device.objects.filter(name=instance.user.email).exists():
message = {'notify_type':'Assign Site', 'site':{'name': instance.site.name, 'id': instance.site.id}, 'project':{'name': instance.project.name, 'id': instance.project.id}}
try:
Device.objects.filter(name=instance.user.email).send_message(message)
except:
pass
post_save.connect(create_messages, sender=UserRole)
| bsd-2-clause |
GoogleCloudPlatform/appengine-mapreduce | python/test/mapreduce/records_test.py | 15 | 16355 | #!/usr/bin/env python
"""Tests for records.py."""
from __future__ import with_statement
import array
import unittest
from mapreduce.third_party import crc32c
from mapreduce import records
class StringWriter(object):
"""records.FileWriter compliant writer to string."""
def __init__(self):
self.data = ''
def write(self, bytes):
self.data += bytes
def toarray(self):
a = array.array('B')
a.fromstring(self.data)
return a
def tolist(self):
return self.toarray().tolist()
class StringReader(object):
"""records.FileReader compliant reader from string."""
def __init__(self, data):
self.data = data
self.position = 0
def read(self, length):
result = self.data[self.position:self.position + length]
self.position = min(self.position + length, len(self.data))
return result
def tell(self):
return self.position
def seek(self, position):
self.position = position
class RecordsTest(unittest.TestCase):
"""Test records operations."""
def setUp(self):
# Work with 10 bytes blocks.
records._BLOCK_SIZE = 20
def testMaskUnmaskCrc(self):
"""Test masking and unmasking crc."""
crc = crc32c.crc('foo')
self.assertNotEquals(crc, records._mask_crc(crc))
self.assertNotEquals(crc, records._mask_crc(records._mask_crc(crc)))
self.assertEqual(crc, records._unmask_crc(records._mask_crc(crc)))
self.assertEqual(
crc,
records._unmask_crc(records._unmask_crc(
records._mask_crc(records._mask_crc(crc)))))
# This value gave me troubles.
crc = 2685849682
self.assertEquals(crc, records._unmask_crc(records._mask_crc(crc)))
def testWriteEmptyRecordWithPadding(self):
"""Test writing empty record."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('')
self.assertSequenceEqual(
[
# Record 1
5, 43, 40, 67, # crc
0, 0, # length
1, # type
],
writer.tolist())
w._pad_block()
self.assertSequenceEqual(
[
# Record 1
5, 43, 40, 67, # crc
0, 0, # length
1, # type
# Padding
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0,
],
writer.tolist())
def testWriteWholeBlocks(self):
"""Test writing whole block records."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 13)
self.assertSequenceEqual(
[
# Record 1
134, 248, 6, 190, # crc
13, 0, # length
1, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
49, 49, 49,
],
writer.tolist())
w._pad_block()
self.assertSequenceEqual(
[
# Record 1
134, 248, 6, 190, # crc
13, 0, # length
1, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
49, 49, 49,
],
writer.tolist())
def testWriteNoRoomForHeader(self):
"""Test writing a record that doesn't have room for next record in block."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 10)
self.assertSequenceEqual(
[
# Record 1
79, 121, 169, 29, # crc
10, 0, # length
1, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
],
writer.tolist())
w.write('1' * 10)
self.assertSequenceEqual(
[
# Record 1
79, 121, 169, 29, # crc
10, 0, # length
1, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
# Padding
0, 0, 0,
# Record 2
79, 121, 169, 29, # crc
10, 0, # length
1, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
],
writer.tolist())
self.assertSequenceEqual(
[
# Record 1
79, 121, 169, 29, # crc
10, 0, # length
1, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
# Padding
0, 0, 0,
# Record 2
79, 121, 169, 29, # crc
10, 0, # length
1, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
# No Padding
],
writer.tolist())
def testWriteLargeRecord(self):
"""Test writing a record larger than block."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 60)
self.assertSequenceEqual(
[
# Chunk 1
149, 224, 148, 134,# crc
13, 0, # length
2, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
49, 49, 49,
# Chunk 2
139, 72, 32, 241, # crc
13, 0, # length
3, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
49, 49, 49,
# Chunk 3
139, 72, 32, 241, # crc
13, 0, # length
3, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
49, 49, 49,
# Chunk 4
139, 72, 32, 241, # crc
13, 0, # length
3, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
49, 49, 49,
# Chunk 5
198, 68, 73, 41, # crc
8, 0, # length
4, # type
49, 49, 49, 49, 49,
49, 49, 49,
# No Padding
],
writer.tolist())
def testWriteHeaderAtTheEndOfBlock(self):
"""Test writing a header when there's exactly 7 bytes left in block."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 6)
w.write('1' * 10)
self.assertSequenceEqual(
[
# Record 1
43, 18, 162, 121, # crc
6, 0, # length
1, # type
49, 49, 49, 49, 49,
49,
# Record 2, chunk 1
100, 81, 208, 233, # crc
0, 0, # length
2, # type
# Record 2, chunk 2
130, 247, 235, 147, # crc
10, 0, # length
4, # type
49, 49, 49, 49, 49,
49, 49, 49, 49, 49,
# No Padding
],
writer.tolist())
def testPadBlockIdempotency(self):
"""Test _pad_block is idempotent."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('')
w._pad_block()
w._pad_block()
w._pad_block()
w._pad_block()
reader = records.RecordsReader(StringReader(writer.data))
self.assertEqual('', reader.read())
self.assertEqual(records._BLOCK_SIZE, len(writer.data))
def testReadEmptyRecord(self):
"""Test reading empty records."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('')
w._pad_block()
with records.RecordsWriter(writer) as w:
w.write('')
reader = records.RecordsReader(StringReader(writer.data))
self.assertEqual('', reader.read())
# Should correctly skip padding.
self.assertEqual('', reader.read())
self.assertRaises(EOFError, reader.read)
def testReadWholeBlocks(self):
"""Test reading record occupying a whole block."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 13)
w._pad_block()
with records.RecordsWriter(writer) as w:
w.write('1' * 13)
reader = records.RecordsReader(StringReader(writer.data))
self.assertEqual('1' * 13, reader.read())
self.assertEqual('1' * 13, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadNoRoomForHeader(self):
"""Test reading records that leave <7 bytes in a block."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 10)
w.write('1' * 10)
reader = records.RecordsReader(StringReader(writer.data))
self.assertEqual('1' * 10, reader.read())
self.assertEqual('1' * 10, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadLargeRecords(self):
"""Test reading large headers."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 10)
w.write('1' * 20)
w.write('1' * 30)
w.write('1' * 40)
w.write('1' * 50)
w.write('1' * 60)
w.write('1' * 70)
reader = records.RecordsReader(StringReader(writer.data))
self.assertEqual('1' * 10, reader.read())
self.assertEqual('1' * 20, reader.read())
self.assertEqual('1' * 30, reader.read())
self.assertEqual('1' * 40, reader.read())
self.assertEqual('1' * 50, reader.read())
self.assertEqual('1' * 60, reader.read())
self.assertEqual('1' * 70, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadHeaderAtTheEndOfTheBlock(self):
"""Test reading records, that leave exactly 7 bytes at the end of block."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 6)
w.write('1' * 10)
reader = records.RecordsReader(StringReader(writer.data))
self.assertEqual('1' * 6, reader.read())
self.assertEqual('1' * 10, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadCorruptedCrcSmallRecord(self):
"""Test reading small records with corrupted crc."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Block 1
w.write('1' * 2)
w.write('1' * 2)
# Block 2
w.write('1' * 2)
w.write('1' * 2)
data = writer.data
data = '_' + data[1:]
reader = records.RecordsReader(StringReader(data))
# First block should be completely skipped.
self.assertEqual('1' * 2, reader.read())
self.assertEqual('1' * 2, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadCorruptedCrcLargeRecord(self):
"""Test reading large records with corrupted crc."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Blocks 1-6
w.write('1' * 100)
# Block 7
w.write('1' * 2)
data = writer.data
data = '_' + data[1:]
reader = records.RecordsReader(StringReader(data))
# First record should be completely skipped.
self.assertEqual('1' * 2, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadCorruptedLength(self):
"""Test reading record with corrupted length."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Block 1
w.write('1' * 2)
w.write('1' * 2)
# Block 2
w.write('1' * 2)
w.write('1' * 2)
data = writer.data
# replace length by 65535
data = data[:4] + '\xff\xff' + data[6:]
reader = records.RecordsReader(StringReader(data))
# First block should be completely skipped.
self.assertEqual('1' * 2, reader.read())
self.assertEqual('1' * 2, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadCorruptedRecordOrder_FirstThenFirst(self):
"""Tests corruption when a first record is followed by a first record."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Fake first record that should be ignored
w._RecordsWriter__write_record(
records._RECORD_TYPE_FIRST, 'A' * (records._BLOCK_SIZE / 2))
# Multi-block record. This will cause 'A' to be ignored because
# of a repeated first record.
w.write('B' * 2 * records._BLOCK_SIZE)
data = writer.data
reader = records.RecordsReader(StringReader(data))
self.assertEqual('B' * 2 * records._BLOCK_SIZE, reader.read())
self.assertRaises(EOFError, reader.read)
def testReadCorruptedRecordOrder_FirstThenFull(self):
"""Tests corruption when a first record is followed by a full record."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Fake first record that should be ignored
w._RecordsWriter__write_record(
records._RECORD_TYPE_FIRST, 'A' * (records._BLOCK_SIZE / 2))
# Single-block, "full" record.
w.write('B' * (records._BLOCK_SIZE / 4))
data = writer.data
reader = records.RecordsReader(StringReader(data))
self.assertEqual('B' * (records._BLOCK_SIZE / 4), reader.read())
self.assertRaises(EOFError, reader.read)
def testReadCorruptedRecordOrder_MiddleThenFull(self):
"""Tests corruption when a middle record is followed by a full record."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Fake middle record that should be ignored
w._RecordsWriter__write_record(
records._RECORD_TYPE_MIDDLE, 'A' * (records._BLOCK_SIZE / 2))
# Single-block, "full" record.
w.write('B' * (records._BLOCK_SIZE / 4))
data = writer.data
reader = records.RecordsReader(StringReader(data))
self.assertEqual('B' * (records._BLOCK_SIZE / 4), reader.read())
self.assertRaises(EOFError, reader.read)
def testReadCorruptedRecordOrder_LastThenFull(self):
"""Tests corruption when a last record is followed by a full record."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Fake last record that should be ignored
w._RecordsWriter__write_record(
records._RECORD_TYPE_LAST, 'A' * (records._BLOCK_SIZE / 2))
# Single-block, "full" record.
w.write('B' * (records._BLOCK_SIZE / 4))
data = writer.data
reader = records.RecordsReader(StringReader(data))
self.assertEqual('B' * (records._BLOCK_SIZE / 4), reader.read())
self.assertRaises(EOFError, reader.read)
def testIter(self):
"""Test reader iterator interface."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
w.write('1' * 1)
w.write('2' * 2)
w.write('3' * 3)
w.write('4' * 4)
reader = records.RecordsReader(StringReader(writer.data))
self.assertEqual(['1', '22', '333', '4444'], list(reader))
def testReadTruncatedBuffer(self):
"""Test reading records from truncated file."""
writer = StringWriter()
with records.RecordsWriter(writer) as w:
# Block 1
w.write('1' * 2)
w.write('1' * 2)
# Block 2
w.write('1' * 2)
w.write('1' * 2)
data = writer.data
while data:
data = data[:-1]
reader = records.RecordsReader(StringReader(data))
count = len(list(reader))
if len(data) >= 38:
self.assertEqual(4, count)
elif len(data) >= 29:
self.assertEqual(3, count)
elif len(data) >= 18:
self.assertEqual(2, count)
elif len(data) >= 9:
self.assertEqual(1, count)
else:
self.assertEqual(0, count)
def testSmoke(self):
"""Smoke test of all cases.
Other smaller tests are more revealing in particular situations.
"""
input_size = 0
# Try many input sizes!
while input_size < records._BLOCK_SIZE * 3:
writer = StringWriter()
inputs = '1' * input_size
with records.RecordsWriter(writer) as w:
# Make sure even the smallest input covers more than one block.
for _ in range(records._BLOCK_SIZE):
w.write(inputs)
reader = records.RecordsReader(StringReader(writer.data))
for _ in range(records._BLOCK_SIZE):
self.assertEqual(inputs, reader.read())
self.assertRaises(EOFError, reader.read)
input_size += 1
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
lukasjuhrich/pycroft | tests/model/test_traffic.py | 1 | 3884 | from pycroft.model import session
from pycroft.model.traffic import TrafficVolume, pmacct_traffic_egress, \
pmacct_traffic_ingress
from tests import FactoryDataTestBase
from tests.factories import UserWithHostFactory
class PMAcctViewTest(FactoryDataTestBase):
ip = '141.30.228.39'
bad_ip = '141.30.228.1'
egress_table = pmacct_traffic_egress.table
ingress_table = pmacct_traffic_ingress.table
def create_factories(self):
self.user = UserWithHostFactory(host__interface__ip__str_address=self.ip)
def build_insert(self, type, **kwargs):
if type == 'Ingress':
table = self.ingress_table
ip_key = 'ip_dst'
elif type == 'Egress':
table = self.egress_table
ip_key = 'ip_src'
else:
raise ValueError("type must be one of 'Ingress', 'Egress'")
stamp = session.utcnow()
values = {
'bytes': 1024,
'packets': 20,
ip_key: self.ip,
'stamp_inserted': stamp,
'stamp_updated': stamp,
}
values.update(**kwargs)
return table.insert().values(**values)
def test_egress_insert(self):
session.session.execute(self.build_insert(type='Egress'))
self.assertEqual(TrafficVolume.q.count(), 1)
volume = TrafficVolume.q.one()
self.assertEqual(volume.type, 'Egress')
self.assertEqual(volume.amount, 1024)
self.assertEqual(volume.packets, 20)
self.assertEqual(volume.user, self.user)
def test_egress_insert_nonexistent_ip(self):
session.session.execute(self.build_insert(type='Egress', ip_src="1.1.1.1"))
self.assertEqual(TrafficVolume.q.count(), 0)
def test_egress_update_successive_entries(self):
data = [
# timestamp, packets, amount
('2018-03-15 00:15:00', 200, 1024),
('2018-03-15 10:15:00', 324, 500),
('2018-03-15 23:59:00', 12, 7055),
]
for stamp, packets, bytes in data:
session.session.execute(self.build_insert(type='Egress', packets=packets, bytes=bytes,
stamp_inserted=stamp, stamp_updated=stamp))
self.assertEqual(TrafficVolume.q.count(), 1)
vol = TrafficVolume.q.one()
self.assertEqual(str(vol.timestamp), '2018-03-15 00:00:00+00:00')
self.assertEqual(vol.packets, sum(x[1] for x in data))
self.assertEqual(vol.amount, sum(x[2] for x in data))
def test_ingress_insert(self):
session.session.execute(self.build_insert(type='Ingress'))
self.assertEqual(TrafficVolume.q.count(), 1)
volume = TrafficVolume.q.one()
self.assertEqual(volume.type, 'Ingress')
self.assertEqual(volume.amount, 1024)
self.assertEqual(volume.packets, 20)
self.assertEqual(volume.user, self.user)
def test_ingress_insert_nonexistent_ip(self):
session.session.execute(self.build_insert(type='Ingress', ip_dst="1.1.1.1"))
self.assertEqual(TrafficVolume.q.count(), 0)
def test_ingress_update_successive_entries(self):
data = [
# timestamp, packets, amount
('2018-03-15 00:15:00', 200, 1024),
('2018-03-15 10:15:00', 324, 500),
('2018-03-15 23:59:00', 12, 7055),
]
for stamp, packets, bytes in data:
session.session.execute(self.build_insert(type='Ingress', packets=packets, bytes=bytes,
stamp_inserted=stamp, stamp_updated=stamp))
self.assertEqual(TrafficVolume.q.count(), 1)
vol = TrafficVolume.q.one()
self.assertEqual(str(vol.timestamp), '2018-03-15 00:00:00+00:00')
self.assertEqual(vol.packets, sum(x[1] for x in data))
self.assertEqual(vol.amount, sum(x[2] for x in data))
| apache-2.0 |
joequant/fabric | bddtests/steps/bdd_grpc_util.py | 6 | 6608 |
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import devops_pb2
import fabric_pb2
import chaincode_pb2
import bdd_test_util
from grpc.beta import implementations
def getSecretForUserRegistration(userRegistration):
return devops_pb2.Secret(enrollId=userRegistration.secretMsg['enrollId'],enrollSecret=userRegistration.secretMsg['enrollSecret'])
def getTxResult(context, enrollId):
'''Returns the TransactionResult using the enrollId supplied'''
assert 'users' in context, "users not found in context. Did you register a user?"
assert 'compose_containers' in context, "compose_containers not found in context"
(channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
stub = devops_pb2.beta_create_Devops_stub(channel)
txRequest = devops_pb2.TransactionRequest(transactionUuid = context.transactionID)
response = stub.GetTransactionResult(txRequest, 2)
assert response.status == fabric_pb2.Response.SUCCESS, 'Failure getting Transaction Result from {0}, for user "{1}": {2}'.format(userRegistration.composeService,enrollId, response.msg)
# Now grab the TransactionResult from the Msg bytes
txResult = fabric_pb2.TransactionResult()
txResult.ParseFromString(response.msg)
return txResult
def getGRPCChannel(ipAddress):
channel = implementations.insecure_channel(ipAddress, 7051)
print("Returning GRPC for address: {0}".format(ipAddress))
return channel
def getGRPCChannelAndUser(context, enrollId):
'''Returns a tuple of GRPC channel and UserRegistration instance. The channel is open to the composeService that the user registered with.'''
userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
# Get the IP address of the server that the user registered on
ipAddress = bdd_test_util.ipFromContainerNamePart(userRegistration.composeService, context.compose_containers)
channel = getGRPCChannel(ipAddress)
return (channel, userRegistration)
def getDeployment(context, ccAlias):
'''Return a deployment with chaincode alias from prior deployment, or None if not found'''
deployment = None
if 'deployments' in context:
pass
else:
context.deployments = {}
if ccAlias in context.deployments:
deployment = context.deployments[ccAlias]
# else:
# raise Exception("Deployment alias not found: '{0}'. Are you sure you have deployed a chaincode with this alias?".format(ccAlias))
return deployment
def deployChaincode(context, enrollId, chaincodePath, ccAlias, ctor):
'''Deploy a chaincode with the specified alias for the specfied enrollId'''
(channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
stub = devops_pb2.beta_create_Devops_stub(channel)
# Make sure deployment alias does NOT already exist
assert getDeployment(context, ccAlias) == None, "Deployment alias already exists: '{0}'.".format(ccAlias)
args = getArgsFromContextForUser(context, enrollId)
ccSpec = chaincode_pb2.ChaincodeSpec(type = chaincode_pb2.ChaincodeSpec.GOLANG,
chaincodeID = chaincode_pb2.ChaincodeID(name="",path=chaincodePath),
ctorMsg = chaincode_pb2.ChaincodeInput(function = ctor, args = args))
ccSpec.secureContext = userRegistration.getUserName()
if 'metadata' in context:
ccSpec.metadata = context.metadata
try:
ccDeploymentSpec = stub.Deploy(ccSpec, 60)
ccSpec.chaincodeID.name = ccDeploymentSpec.chaincodeSpec.chaincodeID.name
context.grpcChaincodeSpec = ccSpec
context.deployments[ccAlias] = ccSpec
except:
del stub
raise
def invokeChaincode(context, enrollId, ccAlias, functionName):
# Get the deployment for the supplied chaincode alias
deployedCcSpec = getDeployment(context, ccAlias)
assert deployedCcSpec != None, "Deployment NOT found for chaincode alias '{0}'".format(ccAlias)
# Create a new ChaincodeSpec by copying the deployed one
newChaincodeSpec = chaincode_pb2.ChaincodeSpec()
newChaincodeSpec.CopyFrom(deployedCcSpec)
# Update hte chaincodeSpec ctorMsg for invoke
args = getArgsFromContextForUser(context, enrollId)
chaincodeInput = chaincode_pb2.ChaincodeInput(function = functionName, args = args )
newChaincodeSpec.ctorMsg.CopyFrom(chaincodeInput)
ccInvocationSpec = chaincode_pb2.ChaincodeInvocationSpec(chaincodeSpec = newChaincodeSpec)
(channel, userRegistration) = getGRPCChannelAndUser(context, enrollId)
stub = devops_pb2.beta_create_Devops_stub(channel)
response = stub.Invoke(ccInvocationSpec,2)
return response
def getArgsFromContextForUser(context, enrollId):
# Update the chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
if context.table:
# There are function arguments
userRegistration = bdd_test_util.getUserRegistration(context, enrollId)
# Allow the user to specify expressions referencing tags in the args list
pattern = re.compile('\{(.*)\}$')
for arg in context.table[0].cells:
m = pattern.match(arg)
if m:
# tagName reference found in args list
tagName = m.groups()[0]
# make sure the tagName is found in the users tags
assert tagName in userRegistration.tags, "TagName '{0}' not found for user '{1}'".format(tagName, userRegistration.getUserName())
args.append(userRegistration.tags[tagName])
else:
#No tag referenced, pass the arg
args.append(arg)
return args
def toStringArray(items):
itemsAsStr = []
for item in items:
if type(item) == str:
itemsAsStr.append(item)
elif type(item) == unicode:
itemsAsStr.append(str(item))
else:
raise Exception("Error tring to convert to string: unexpected type '{0}'".format(type(item)))
return itemsAsStr
| apache-2.0 |
Lujeni/ansible | lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py | 25 | 9925 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_postgresqlfirewallrule
version_added: "2.8"
short_description: Manage PostgreSQL firewall rule instance
description:
- Create, update and delete instance of PostgreSQL firewall rule.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the PostgreSQL firewall rule.
required: True
start_ip_address:
description:
- The start IP address of the PostgreSQL firewall rule. Must be IPv4 format.
end_ip_address:
description:
- The end IP address of the PostgreSQL firewall rule. Must be IPv4 format.
state:
description:
- Assert the state of the PostgreSQL firewall rule. Use C(present) to create or update a PostgreSQL firewall rule and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create (or update) PostgreSQL firewall rule
azure_rm_postgresqlfirewallrule:
resource_group: myResourceGroup
server_name: testserver
name: rule1
start_ip_address: 10.0.0.16
end_ip_address: 10.0.0.18
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/testserver
/firewallRules/rule1"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMPostgreSqlFirewallRules(AzureRMModuleBase):
"""Configuration class for an Azure RM PostgreSQL firewall rule resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
start_ip_address=dict(
type='str'
),
end_ip_address=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.start_ip_address = None
self.end_ip_address = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMPostgreSqlFirewallRules, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_firewallrule()
if not old_response:
self.log("PostgreSQL firewall rule instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("PostgreSQL firewall rule instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if PostgreSQL firewall rule instance has to be deleted or may be updated")
if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
self.to_do = Actions.Update
if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the PostgreSQL firewall rule instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_firewallrule()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("PostgreSQL firewall rule instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_firewallrule()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_firewallrule():
time.sleep(20)
else:
self.log("PostgreSQL firewall rule instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_firewallrule(self):
'''
Creates or updates PostgreSQL firewall rule with the specified configuration.
:return: deserialized PostgreSQL firewall rule instance state dictionary
'''
self.log("Creating / Updating the PostgreSQL firewall rule instance {0}".format(self.name))
try:
response = self.postgresql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name,
start_ip_address=self.start_ip_address,
end_ip_address=self.end_ip_address)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the PostgreSQL firewall rule instance.')
self.fail("Error creating the PostgreSQL firewall rule instance: {0}".format(str(exc)))
return response.as_dict()
def delete_firewallrule(self):
'''
Deletes specified PostgreSQL firewall rule instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the PostgreSQL firewall rule instance {0}".format(self.name))
try:
response = self.postgresql_client.firewall_rules.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the PostgreSQL firewall rule instance.')
self.fail("Error deleting the PostgreSQL firewall rule instance: {0}".format(str(e)))
return True
def get_firewallrule(self):
'''
Gets the properties of the specified PostgreSQL firewall rule.
:return: deserialized PostgreSQL firewall rule instance state dictionary
'''
self.log("Checking if the PostgreSQL firewall rule instance {0} is present".format(self.name))
found = False
try:
response = self.postgresql_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("PostgreSQL firewall rule instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the PostgreSQL firewall rule instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMPostgreSqlFirewallRules()
if __name__ == '__main__':
main()
| gpl-3.0 |
40123210/w17b_exam | static/Brython3.1.1-20150328-091302/Lib/getopt.py | 845 | 7488 | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <liw@iki.fi>.
#
# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - a option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| agpl-3.0 |
orcasgit/blender | tagging/views.py | 30 | 2118 | """
Tagging related views.
"""
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.list_detail import object_list
from tagging.models import Tag, TaggedItem
from tagging.utils import get_tag, get_queryset_and_model
def tagged_object_list(request, queryset_or_model=None, tag=None,
related_tags=False, related_tag_counts=True, **kwargs):
"""
A thin wrapper around
``django.views.generic.list_detail.object_list`` which creates a
``QuerySet`` containing instances of the given queryset or model
tagged with the given tag.
In addition to the context variables set up by ``object_list``, a
``tag`` context variable will contain the ``Tag`` instance for the
tag.
If ``related_tags`` is ``True``, a ``related_tags`` context variable
will contain tags related to the given tag for the given model.
Additionally, if ``related_tag_counts`` is ``True``, each related
tag will have a ``count`` attribute indicating the number of items
which have it in addition to the given tag.
"""
if queryset_or_model is None:
try:
queryset_or_model = kwargs.pop('queryset_or_model')
except KeyError:
raise AttributeError(_('tagged_object_list must be called with a queryset or a model.'))
if tag is None:
try:
tag = kwargs.pop('tag')
except KeyError:
raise AttributeError(_('tagged_object_list must be called with a tag.'))
tag_instance = get_tag(tag)
if tag_instance is None:
raise Http404(_('No Tag found matching "%s".') % tag)
queryset = TaggedItem.objects.get_by_model(queryset_or_model, tag_instance)
if not kwargs.has_key('extra_context'):
kwargs['extra_context'] = {}
kwargs['extra_context']['tag'] = tag_instance
if related_tags:
kwargs['extra_context']['related_tags'] = \
Tag.objects.related_for_model(tag_instance, queryset_or_model,
counts=related_tag_counts)
return object_list(request, queryset, **kwargs)
| bsd-3-clause |
kvandermast/hic_sunt | nodejs/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| gpl-3.0 |
GaZ3ll3/numpy | numpy/distutils/fcompiler/compaq.py | 102 | 4145 |
#http://www.compaq.com/fortran/docs/
from __future__ import division, absolute_import, print_function
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.compat import get_exception
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore', '-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g', '-check bounds']
def get_flags_opt(self):
return ['-O4', '-align dcommons', '-assume bigarrays',
'-assume nozsize', '-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared', '-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'\
' Version (?P<version>[^\s]*).*'
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError:
msg = get_exception()
if '_MSVCCompiler__root' in str(msg):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg))
else:
raise
except IOError:
e = get_exception()
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
except ValueError:
e = get_exception()
if not "path']" in str(e):
print("Unexpected ValueError in", __file__)
raise e
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase', '/assume:underscore']
def get_flags_opt(self):
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='compaq')
compiler.customize()
print(compiler.get_version())
| bsd-3-clause |
gangadhar-kadam/verve_live_frappe | frappe/email/doctype/email_account/test_email_account.py | 4 | 3903 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import frappe, os
import unittest, email
test_records = frappe.get_test_records('Email Account')
from frappe.core.doctype.communication.communication import make
from frappe.desk.form.load import get_attachments
from frappe.utils.file_manager import delete_file_from_filesystem
class TestEmailAccount(unittest.TestCase):
def test_incoming(self):
frappe.db.sql("delete from tabCommunication where sender='test_sender@example.com'")
with open(os.path.join(os.path.dirname(__file__), "test_mails", "incoming-1.raw"), "r") as f:
test_mails = [f.read()]
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertTrue("test_receiver@example.com" in comm.recipients)
def test_incoming_with_attach(self):
frappe.db.sql("delete from tabCommunication where sender='test_sender@example.com'")
existing_file = frappe.get_doc({'doctype': 'File Data', 'file_name': 'erpnext-conf-14.png'})
frappe.delete_doc("File Data", existing_file.name)
delete_file_from_filesystem(existing_file)
with open(os.path.join(os.path.dirname(__file__), "test_mails", "incoming-2.raw"), "r") as f:
test_mails = [f.read()]
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertTrue("test_receiver@example.com" in comm.recipients)
# check attachment
attachments = get_attachments(comm.doctype, comm.name)
self.assertTrue("erpnext-conf-14.png" in [f.file_name for f in attachments])
def test_outgoing(self):
frappe.flags.sent_mail = None
make(subject = "test-mail-000", content="test mail 000", recipients="test_receiver@example.com",
send_email=True, sender="test_sender@example.com")
mail = email.message_from_string(frappe.get_last_doc("Bulk Email").message)
self.assertTrue("test-mail-000" in mail.get("Subject"))
def test_sendmail(self):
frappe.flags.sent_mail = None
frappe.sendmail(sender="test_sender@example.com", recipients="test_recipient@example.com",
content="test mail 001", subject="test-mail-001")
sent_mail = email.message_from_string(frappe.flags.sent_mail)
self.assertTrue("test-mail-001" in sent_mail.get("Subject"))
def test_print_format(self):
frappe.flags.sent_mail = None
make(sender="test_sender@example.com", recipients="test_recipient@example.com",
content="test mail 001", subject="test-mail-002", doctype="Email Account",
name="_Test Email Account 1", print_format="Standard", send_email=True)
sent_mail = email.message_from_string(frappe.get_last_doc("Bulk Email").message)
self.assertTrue("test-mail-002" in sent_mail.get("Subject"))
def test_threading(self):
frappe.db.sql("""delete from tabCommunication
where sender in ('test_sender@example.com', 'test@example.com')""")
# send
sent_name = make(subject = "Test", content="test content",
recipients="test_receiver@example.com", sender="test@example.com",
send_email=True)
sent_mail = email.message_from_string(frappe.get_last_doc("Bulk Email").message)
with open(os.path.join(os.path.dirname(__file__), "test_mails", "reply-1.raw"), "r") as f:
raw = f.read()
raw = raw.replace("<-- in-reply-to -->", sent_mail.get("Message-Id"))
test_mails = [raw]
# parse reply
email_account = frappe.get_doc("Email Account", "_Test Email Account 1")
email_account.receive(test_mails=test_mails)
sent = frappe.get_doc("Communication", sent_name)
comm = frappe.get_doc("Communication", {"sender": "test_sender@example.com"})
self.assertEquals(comm.reference_doctype, sent.doctype)
self.assertEquals(comm.reference_name, sent.name)
| mit |
MemeticParadigm/TensorFlow | tensorflow/python/client/graph_util.py | 5 | 3864 | """Helpers to manipulate a tensor graph in python.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import ops
from tensorflow.python.framework import types
from tensorflow.python.platform import logging
_VARIABLE_OPS = {
"Assign",
"AssignAdd",
"AssignSub",
"Queue",
"RandomParameters",
"ScatterAdd",
"ScatterSub",
"ScatterUpdate",
"Variable",
}
def _is_variable_op(op):
"""Returns true if 'op' refers to a Variable node."""
return op in _VARIABLE_OPS
def set_cpu0(device_string):
"""Creates a new device string based on `device_string' but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.from_string(device_string)
parsed_device.device_type = "CPU"
parsed_device.device_index = 0
return parsed_device.to_string()
def must_run_on_cpu(node, pin_variables_on_cpu=False):
"""Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CPU, otherwise False.
"""
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, graph_pb2.NodeDef)
node_def = node
# If the op is a variable-related op, should we pin it on CPU?
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
# Constant operations producing a string or int32 must run on CPU.
if node_def.op == "Const":
# Get the value of the 'dtype' attr
dtype = node_def.attr["dtype"].type
if dtype == types.string or dtype == types.int32:
return True
if node_def.op == "DynamicStitch":
dtype = node_def.attr["T"].type
if dtype == types.int32:
# DynamicStitch on GPU only works for int32 values.
return True
if node_def.op in ["Cast"]:
dtype = node_def.attr["SrcT"].type
if dtype == types.int32:
# Cast on GPU does not works for int32 values.
return True
return False
################################################################################
#
# device functions for use in with g.device(...)
#
################################################################################
def pin_variables_on_cpu(op):
"""Returns a CPU device for Variable nodes if the device is not specified.
Args:
op: The ops.Operation object describing the node for which a device
should be chosen. The op.device field is respected.
Returns:
A device containing "/device:CPU:0" if the node is related to a variable.
"""
device = op.device if op.device is not None else ""
dev = pydev.from_string(device)
# If a device type exists already, do not override.
if dev.device_type:
return device
if isinstance(op, ops.Operation):
node_def = op.node_def
else:
assert isinstance(op, graph_pb2.NodeDef)
node_def = op
if _is_variable_op(node_def.op):
return set_cpu0(device)
return device
def pin_to_cpu(op):
"""Returns a CPU device for the given node."""
device = op.device if op.device is not None else ""
dev = pydev.from_string(device)
if not dev.device_type:
return set_cpu0(device)
if dev.device_type == "CPU":
return device
logging.info("Operation %s has been assigned to a non-CPU (%s), so "
"it will not be pinned to the CPU.", op.name, dev.device_type)
return device
| apache-2.0 |
40223139/LEGOg7-39 | static/Brython3.1.3-20150514-095342/Lib/_random.py | 85 | 3370 | from browser import window, alert
def _randint(a, b):
return int(window.Math.random()*(b-a+1)+a)
def _rand_with_seed(x, rand_obj):
x = window.Math.sin(rand_obj._state) * 10000
# Adding 1 is not reliable because of current integer implementation
# If rand_obj._state is not a "safe integer" in the range [-2**53, 2**53]
# the increment between 2 different values is a power of 2
# It is stored in an attribute of rand_obj to avoid having to compute it
# for each iteration
if not hasattr(rand_obj, 'incr'):
rand_obj.incr = 1
n = rand_obj._state
while n+rand_obj.incr==n:
# increase the increment until the increment value is different
rand_obj.incr *= 2
rand_obj._state += rand_obj.incr
return x - window.Math.floor(x)
def _urandom(n, rand_obj=None):
"""urandom(n) -> str
Return n random bytes suitable for cryptographic use."""
if rand_obj is None or rand_obj._state is None:
randbytes= [_randint(0,255) for i in range(n)]
else:
randbytes= []
for i in range(n):
randbytes.append(int(256*_rand_with_seed(i, rand_obj)))
return bytes(randbytes)
class Random:
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
#random
#seed
#getstate
#setstate
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self._state=x
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
self._state=a
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self._state
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
self._state=state
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return window.Math.random()
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes, self), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
| agpl-3.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/click/termui.py | 202 | 21008 | import os
import sys
import struct
from ._compat import raw_input, text_type, string_types, \
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
from .utils import echo
from .exceptions import Abort, UsageError
from .types import convert_type
from .globals import resolve_color_default
# The prompt functions to use. The doc tools currently override these
# functions to customize how they work.
visible_prompt_func = raw_input
_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta',
'cyan', 'white', 'reset')
_ansi_reset_all = '\033[0m'
def hidden_prompt_func(prompt):
import getpass
return getpass.getpass(prompt)
def _build_prompt(text, suffix, show_default=False, default=None):
prompt = text
if default is not None and show_default:
prompt = '%s [%s]' % (prompt, default)
return prompt + suffix
def prompt(text, default=None, hide_input=False,
confirmation_prompt=False, type=None,
value_proc=None, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending a interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: asks for confirmation for the value.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
result = None
def prompt_func(text):
f = hide_input and hidden_prompt_func or visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text, nl=False, err=err)
return f('')
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort()
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(text, prompt_suffix, show_default, default)
while 1:
while 1:
value = prompt_func(prompt)
if value:
break
# If a default is set and used, then the confirmation
# prompt is always skipped because that's the only thing
# that really makes sense.
elif default is not None:
return default
try:
result = value_proc(value)
except UsageError as e:
echo('Error: %s' % e.message, err=err)
continue
if not confirmation_prompt:
return result
while 1:
value2 = prompt_func('Repeat for confirmation: ')
if value2:
break
if value == value2:
return result
echo('Error: the two entered values do not match', err=err)
def confirm(text, default=False, abort=False, prompt_suffix=': ',
show_default=True, err=False):
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
.. versionadded:: 4.0
Added the `err` parameter.
:param text: the question to ask.
:param default: the default for the prompt.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
"""
prompt = _build_prompt(text, prompt_suffix, show_default,
default and 'Y/n' or 'y/N')
while 1:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt, nl=False, err=err)
value = visible_prompt_func('').lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort()
if value in ('y', 'yes'):
rv = True
elif value in ('n', 'no'):
rv = False
elif value == '':
rv = default
else:
echo('Error: invalid input', err=err)
continue
break
if abort and not rv:
raise Abort()
return rv
def get_terminal_size():
"""Returns the current size of the terminal as tuple in the form
``(width, height)`` in columns and rows.
"""
# If shutil has get_terminal_size() (Python 3.3 and later) use that
if sys.version_info >= (3, 3):
import shutil
shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
if shutil_get_terminal_size:
sz = shutil_get_terminal_size()
return sz.columns, sz.lines
if get_winterm_size is not None:
return get_winterm_size()
def ioctl_gwinsz(fd):
try:
import fcntl
import termios
cr = struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return
return cr
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
cr = ioctl_gwinsz(fd)
finally:
os.close(fd)
except Exception:
pass
if not cr or not cr[0] or not cr[1]:
cr = (os.environ.get('LINES', 25),
os.environ.get('COLUMNS', DEFAULT_COLUMNS))
return int(cr[1]), int(cr[0])
def echo_via_pager(text, color=None):
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text: the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if not isinstance(text, string_types):
text = text_type(text)
from ._termui_impl import pager
return pager(text + '\n', color)
def progressbar(iterable=None, length=None, label=None, show_eta=True,
show_percent=None, show_pos=False,
item_show_func=None, fill_char='#', empty_char='-',
bar_template='%(label)s [%(bar)s] %(info)s',
info_sep=' ', width=36, file=None, color=None):
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already displayed. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `color` parameter. Added a `update` method to the
progressbar object.
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: a function called with the current item which
can return a string to show the current item
next to the progress bar. Note that the current
item can be `None`!
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: the file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
"""
from ._termui_impl import ProgressBar
color = resolve_color_default(color)
return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
show_percent=show_percent, show_pos=show_pos,
item_show_func=item_show_func, fill_char=fill_char,
empty_char=empty_char, bar_template=bar_template,
info_sep=info_sep, file=file, label=label,
width=width, color=color)
def clear():
"""Clears the terminal screen. This will have the effect of clearing
the whole visible space of the terminal and moving the cursor to the
top left. This does not do anything if not connected to a terminal.
.. versionadded:: 2.0
"""
if not isatty(sys.stdout):
return
# If we're on Windows and we don't have colorama available, then we
# clear the screen by shelling out. Otherwise we can use an escape
# sequence.
if WIN:
os.system('cls')
else:
sys.stdout.write('\033[2J\033[1;1H')
def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
blink=None, reverse=None, reset=True):
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``reset`` (reset the color code only)
.. versionadded:: 2.0
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
"""
bits = []
if fg:
try:
bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30))
except ValueError:
raise TypeError('Unknown color %r' % fg)
if bg:
try:
bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40))
except ValueError:
raise TypeError('Unknown color %r' % bg)
if bold is not None:
bits.append('\033[%dm' % (1 if bold else 22))
if dim is not None:
bits.append('\033[%dm' % (2 if dim else 22))
if underline is not None:
bits.append('\033[%dm' % (4 if underline else 24))
if blink is not None:
bits.append('\033[%dm' % (5 if blink else 25))
if reverse is not None:
bits.append('\033[%dm' % (7 if reverse else 27))
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return ''.join(bits)
def unstyle(text):
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as Click's echo function will
automatically remove styling if necessary.
.. versionadded:: 2.0
:param text: the text to remove style information from.
"""
return strip_ansi(text)
def secho(text, file=None, nl=True, err=False, color=None, **styles):
"""This function combines :func:`echo` and :func:`style` into one
call. As such the following two calls are the same::
click.secho('Hello World!', fg='green')
click.echo(click.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions
depending on which one they go with.
.. versionadded:: 2.0
"""
return echo(style(text, **styles), file=file, nl=nl, err=err, color=color)
def edit(text=None, editor=None, env=None, require_save=True,
extension='.txt', filename=None):
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from ._termui_impl import Editor
editor = Editor(editor=editor, env=env, require_save=require_save,
extension=extension)
if filename is None:
return editor.edit(text)
editor.edit_file(filename)
def launch(url, wait=False, locate=False):
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
click.launch('http://click.pocoo.org/')
click.launch('/my/downloaded/file', locate=True)
.. versionadded:: 2.0
:param url: URL or filename of the thing to launch.
:param wait: waits for the program to stop.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from ._termui_impl import open_url
return open_url(url, wait=wait, locate=locate)
# If this is provided, getchar() calls into this instead. This is used
# for unittesting purposes.
_getchar = None
def getchar(echo=False):
"""Fetches a single character from the terminal and returns it. This
will always return a unicode character and under certain rare
circumstances this might return more than one character. The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal.
Note that this will always read from the terminal, even if something
is piped into the standard input.
.. versionadded:: 2.0
:param echo: if set to `True`, the character read will also show up on
the terminal. The default is to not show it.
"""
f = _getchar
if f is None:
from ._termui_impl import getchar as f
return f(echo)
def pause(info='Press any key to continue ...', err=False):
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: the info string to print before pausing.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err)
| mit |
abenzbiria/clients_odoo | addons/edi/models/res_partner.py | 437 | 4243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.osv import osv
from edi import EDIMixin
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
RES_PARTNER_EDI_STRUCT = {
'name': True,
'ref': True,
'lang': True,
'website': True,
'email': True,
'street': True,
'street2': True,
'zip': True,
'city': True,
'country_id': True,
'state_id': True,
'phone': True,
'fax': True,
'mobile': True,
}
class res_partner(osv.osv, EDIMixin):
_inherit = "res.partner"
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
return super(res_partner,self).edi_export(cr, uid, records,
edi_struct or dict(RES_PARTNER_EDI_STRUCT),
context=context)
def _get_bank_type(self, cr, uid, context=None):
# first option: the "normal" bank type, installed by default
res_partner_bank_type = self.pool.get('res.partner.bank.type')
try:
return self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'bank_normal', context=context).code
except ValueError:
pass
# second option: create a new custom type for EDI or use it if already created, as IBAN type is
# not always appropriate: we need a free-form bank type for max flexibility (users can correct
# data manually after import)
code, label = 'edi_generic', 'Generic Bank Type (auto-created for EDI)'
bank_code_ids = res_partner_bank_type.search(cr, uid, [('code','=',code)], context=context)
if not bank_code_ids:
_logger.info('Normal bank account type is missing, creating '
'a generic bank account type for EDI.')
self.res_partner_bank_type.create(cr, SUPERUSER_ID, {'name': label,
'code': label})
return code
def edi_import(self, cr, uid, edi_document, context=None):
# handle bank info, if any
edi_bank_ids = edi_document.pop('bank_ids', None)
contact_id = super(res_partner,self).edi_import(cr, uid, edi_document, context=context)
if edi_bank_ids:
contact = self.browse(cr, uid, contact_id, context=context)
import_ctx = dict((context or {}),
default_partner_id = contact.id,
default_state=self._get_bank_type(cr, uid, context))
for ext_bank_id, bank_name in edi_bank_ids:
try:
self.edi_import_relation(cr, uid, 'res.partner.bank',
bank_name, ext_bank_id, context=import_ctx)
except osv.except_osv:
# failed to import it, try again with unrestricted default type
_logger.warning('Failed to import bank account using'
'bank type: %s, ignoring', import_ctx['default_state'],
exc_info=True)
return contact_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
markoshorro/gem5 | src/arch/x86/isa/insts/general_purpose/string/compare_strings.py | 91 | 3952 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CMPS_M_M {
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
ld t1, seg, [1, t0, rsi]
ld t2, es, [1, t0, rdi]
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
};
#
# Versions which have the rep prefix. These could benefit from some loop
# unrolling.
#
def macroop CMPS_E_M_M {
and t0, rcx, rcx, flags=(EZF,), dataSize=asz
br label("end"), flags=(CEZF,)
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
topOfLoop:
ld t1, seg, [1, t0, rsi]
ld t2, es, [1, t0, rdi]
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
subi rcx, rcx, 1, flags=(EZF,), dataSize=asz
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
br label("topOfLoop"), flags=(CSTRZnEZF,)
end:
fault "NoFault"
};
def macroop CMPS_N_M_M {
and t0, rcx, rcx, flags=(EZF,), dataSize=asz
br label("end"), flags=(CEZF,)
# Find the constant we need to either add or subtract from rdi
ruflag t0, 10
movi t3, t3, dsz, flags=(CEZF,), dataSize=asz
subi t4, t0, dsz, dataSize=asz
mov t3, t3, t4, flags=(nCEZF,), dataSize=asz
topOfLoop:
ld t1, seg, [1, t0, rsi]
ld t2, es, [1, t0, rdi]
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
subi rcx, rcx, 1, flags=(EZF,), dataSize=asz
add rdi, rdi, t3, dataSize=asz
add rsi, rsi, t3, dataSize=asz
br label("topOfLoop"), flags=(CSTRnZnEZF,)
end:
fault "NoFault"
};
'''
| bsd-3-clause |
puremourning/ycmd-1 | ycmd/tests/java/signature_help_test.py | 3 | 5767 | # Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
empty,
equal_to,
has_entries )
import requests
from ycmd.utils import ReadFile
from ycmd.tests.java import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( CombineRequest,
ParameterMatcher,
SignatureMatcher,
SignatureAvailableMatcher,
WaitUntilCompleterServerReady,
WithRetry )
def ProjectPath( *args ):
return PathToTestFile( 'extra_confs',
'simple_extra_conf_project',
'src',
*args )
def RunTest( app, test ):
"""
Method to run a simple signature help test and verify the result
test is a dictionary containing:
'request': kwargs for BuildRequest
'expect': {
'response': server response code (e.g. httplib.OK)
'data': matcher for the server response json
}
"""
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
app.post_json( '/event_notification',
CombineRequest( test[ 'request' ], {
'event_name': 'FileReadyToParse',
'contents': contents,
} ),
expect_errors = True )
# We ignore errors here and we check the response code ourself.
# This is to allow testing of requests returning errors.
response = app.post_json( '/signature_help',
CombineRequest( test[ 'request' ], {
'contents': contents
} ),
expect_errors = True )
assert_that( response.status_code,
equal_to( test[ 'expect' ][ 'response' ] ) )
print( response.json )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
@WithRetry
@SharedYcmd
def SignatureHelp_MethodTrigger_test( app ):
RunTest( app, {
'description': 'Trigger after (',
'request': {
'filetype' : 'java',
'filepath' : ProjectPath( 'SignatureHelp.java' ),
'line_num' : 9,
'column_num': 17,
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 0,
'signatures': contains_exactly(
SignatureMatcher( 'unique(double d) : void',
[ ParameterMatcher( 7, 15 ) ] )
),
} ),
} )
}
} )
@WithRetry
@SharedYcmd
def SignatureHelp_ArgTrigger_test( app ):
RunTest( app, {
'description': 'Trigger after ,',
'request': {
'filetype' : 'java',
'filepath' : ProjectPath( 'SignatureHelp.java' ),
'line_num' : 5,
'column_num': 23,
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 1,
'activeParameter': 1,
'signatures': contains_exactly(
SignatureMatcher( 'test(int i, String s) : void',
[ ParameterMatcher( 5, 10 ),
ParameterMatcher( 12, 20 ) ] ),
SignatureMatcher( 'test(String s, String s1) : void',
[ ParameterMatcher( 5, 13 ),
ParameterMatcher( 15, 24 ) ] )
),
} ),
} )
}
} )
@WithRetry
@SharedYcmd
def SignatureHelp_Constructor_test( app ):
RunTest( app, {
'description': 'Constructor',
'request': {
'filetype' : 'java',
'filepath' : ProjectPath( 'SignatureHelp.java' ),
'line_num' : 17,
'column_num': 41,
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'errors': empty(),
'signature_help': has_entries( {
'activeSignature': 0,
'activeParameter': 0,
'signatures': contains_exactly(
SignatureMatcher( 'SignatureHelp(String signature)',
[ ParameterMatcher( 14, 30 ) ] )
),
} ),
} )
}
} )
@SharedYcmd
def Signature_Help_Available_test( app ):
request = { 'filepath' : ProjectPath( 'SignatureHelp.java' ) }
app.post_json( '/event_notification',
CombineRequest( request, {
'event_name': 'FileReadyToParse',
'filetype': 'java'
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, 'java' )
response = app.get( '/signature_help_available',
{ 'subserver': 'java' } ).json
assert_that( response, SignatureAvailableMatcher( 'YES' ) )
def Dummy_test():
# Workaround for https://github.com/pytest-dev/pytest-rerunfailures/issues/51
assert True
| gpl-3.0 |
imosquera/spinnaker | google/stackdriver_monitoring/spectator_client.py | 2 | 4027 | # pylint: disable=missing-docstring
import json
import logging
import urllib2
class SpectatorClient(object):
"""Helper class for pulling data from Spectator servers."""
SERVICE_PORT_MAP = {
'clouddriver': 7002,
'echo': 8089,
'fiat': 7003,
'front50': 8080,
'gate': 8084,
'igor': 8088,
'orca': 8083,
'rosco': 8087,
}
def __init__(self, options):
self.__host = options.host
self.__prototype = None
self.__options = options
self.__default_scan_params = {}
if options.prototype_path:
with open(options.prototype_path) as fd:
self.__prototype = json.JSONDecoder().decode(fd.read())
def collect_metrics(self, host, port, params=None):
"""Return JSON metrics from the given server."""
sep = '?'
query = ''
query_params = dict(self.__default_scan_params)
query_params.update(params or {})
for key, value in query_params.items():
query += sep + key + "=" + urllib2.quote(value)
sep = "&"
url = 'http://{host}:{port}/spectator/metrics{query}'.format(
host=host, port=port, query=query)
response = urllib2.urlopen(url)
all_metrics = json.JSONDecoder(encoding='utf-8').decode(response.read())
return (self.filter_metrics(all_metrics, self.__prototype)
if self.__prototype else all_metrics)
def filter_metrics(self, instance, prototype):
"""Filter metrics entries in |instance| to those that match |prototype|.
Only the names and tags are checked. The instance must contain a
tag binding found in the prototype, but may also contain additional tags.
The prototype is the same format as the json of the metrics returned.
"""
filtered = {}
metrics = instance.get('metrics') or {}
for key, expect in prototype.get('metrics', {}).items():
got = metrics.get(key)
if not got:
continue
expect_values = expect.get('values')
if not expect_values:
filtered[key] = got
continue
expect_tags = [elem.get('tags') for elem in expect_values]
# Clone the dict because we are going to modify it to remove values
# we dont care about
keep_values = []
def have_tags(expect_tags, got_tags):
for wanted_set in expect_tags:
ok = True
for want in wanted_set:
if want not in got_tags:
ok = False
break
if ok:
return True
return expect_tags == []
for got_value in got.get('values', []):
got_tags = got_value.get('tags')
if have_tags(expect_tags, got_tags):
keep_values.append(got_value)
if not keep_values:
continue
keep = dict(got)
keep['values'] = keep_values
filtered[key] = keep
result = dict(instance)
result['metrics'] = filtered
return result
def scan_by_service(self, service_list, params=None):
result = {}
if service_list == ['all']:
service_list = self.SERVICE_PORT_MAP.keys()
for service in service_list:
port = self.SERVICE_PORT_MAP[service]
try:
result[service] = self.collect_metrics(self.__host, port, params=params)
except IOError as ioex:
logging.getLogger(__name__).error('%s failed: %s', service, ioex)
return result
def scan_by_type(self, service_list, params=None):
service_map = self.scan_by_service(service_list, params=params)
return self.service_map_to_type_map(service_map)
@staticmethod
def ingest_metrics(service, service_response, type_map):
"""Add JSON metrics |response| from |service| name and add them into |type_map|"""
for key, value in service_response['metrics'].items():
if key in type_map:
type_map[key][service] = value
else:
type_map[key] = {service: value}
@staticmethod
def service_map_to_type_map(service_map):
type_map = {}
for service, got in service_map.items():
SpectatorClient.ingest_metrics(service, got, type_map)
return type_map
| apache-2.0 |
kmoocdev2/edx-platform | lms/djangoapps/commerce/api/v0/tests/test_views.py | 8 | 12479 | """ Commerce API v0 view tests. """
import itertools
import json
from datetime import datetime, timedelta
from uuid import uuid4
import ddt
import mock
import pytz
from django.conf import settings
from django.urls import reverse, reverse_lazy
from django.test import TestCase
from django.test.utils import override_settings
from edx_rest_api_client import exceptions
from nose.plugins.attrib import attr
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from enrollment.api import get_enrollment
from openedx.core.djangoapps.embargo.test_utils import restrict_course
from openedx.core.lib.django_test_client_utils import get_absolute_url
from student.models import CourseEnrollment
from student.tests.tests import EnrollmentEventTestMixin
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ....constants import Messages
from ....tests.mocks import mock_basket_order
from ....tests.test_views import UserMixin
from ..views import SAILTHRU_CAMPAIGN_COOKIE
UTM_COOKIE_NAME = 'edx.test.utm'
UTM_COOKIE_CONTENTS = {
'utm_source': 'test-source'
}
@attr(shard=1)
@ddt.ddt
class BasketsViewTests(EnrollmentEventTestMixin, UserMixin, ModuleStoreTestCase):
"""
Tests for the commerce Baskets view.
"""
def _post_to_view(self, course_id=None, marketing_email_opt_in=False, include_utm_cookie=False):
"""
POST to the view being tested.
Arguments
course_id (str) -- ID of course for which a seat should be ordered.
:return: Response
"""
payload = {
"course_id": unicode(course_id or self.course.id)
}
if marketing_email_opt_in:
payload["email_opt_in"] = True
self.client.cookies[SAILTHRU_CAMPAIGN_COOKIE] = 'sailthru id'
if include_utm_cookie:
self.client.cookies[UTM_COOKIE_NAME] = json.dumps(UTM_COOKIE_CONTENTS)
return self.client.post(self.url, payload)
def assertResponseMessage(self, response, expected_msg):
""" Asserts the detail field in the response's JSON body equals the expected message. """
actual = json.loads(response.content)['detail']
self.assertEqual(actual, expected_msg)
def setUp(self):
super(BasketsViewTests, self).setUp()
self.url = reverse('commerce_api:v0:baskets:create')
self._login()
self.course = CourseFactory.create()
# TODO Verify this is the best method to create CourseMode objects.
# TODO Find/create constants for the modes.
for mode in [CourseMode.HONOR, CourseMode.VERIFIED, CourseMode.AUDIT]:
sku_string = uuid4().hex.decode('ascii')
CourseModeFactory.create(
course_id=self.course.id,
mode_slug=mode,
mode_display_name=mode,
sku=sku_string,
bulk_sku='BULK-{}'.format(sku_string)
)
# Ignore events fired from UserFactory creation
self.reset_tracker()
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_restriction(self):
"""
The view should return HTTP 403 status if the course is embargoed.
"""
with restrict_course(self.course.id) as redirect_url:
response = self._post_to_view()
self.assertEqual(403, response.status_code)
body = json.loads(response.content)
self.assertEqual(get_absolute_url(redirect_url), body['user_message_url'])
def test_login_required(self):
"""
The view should return HTTP 403 status if the user is not logged in.
"""
self.client.logout()
self.assertEqual(403, self._post_to_view().status_code)
@ddt.data('delete', 'get', 'put')
def test_post_required(self, method):
"""
Verify that the view only responds to POST operations.
"""
response = getattr(self.client, method)(self.url)
self.assertEqual(405, response.status_code)
def test_invalid_course(self):
"""
If the course does not exist, the view should return HTTP 406.
"""
# TODO Test inactive courses, and those not open for enrollment.
self.assertEqual(406, self._post_to_view('aaa/bbb/ccc').status_code)
def test_invalid_request_data(self):
"""
If invalid data is supplied with the request, the view should return HTTP 406.
"""
self.assertEqual(406, self.client.post(self.url, {}).status_code)
self.assertEqual(406, self.client.post(self.url, {'not_course_id': ''}).status_code)
@ddt.data(True, False)
def test_course_for_active_and_inactive_user(self, user_is_active):
"""
Test course enrollment for active and inactive user.
"""
# Set user's active flag
self.user.is_active = user_is_active
self.user.save() # pylint: disable=no-member
response = self._post_to_view()
# Validate the response content
self.assertEqual(response.status_code, 200)
msg = Messages.ENROLL_DIRECTLY.format(
course_id=self.course.id,
username=self.user.username
)
self.assertResponseMessage(response, msg)
def _test_course_without_sku(self, enrollment_mode=CourseMode.DEFAULT_MODE_SLUG):
"""
Validates the view when course has no CourseModes with SKUs.
"""
response = self._post_to_view()
# Validate the response content
self.assertEqual(response.status_code, 200)
msg = Messages.NO_SKU_ENROLLED.format(
enrollment_mode=enrollment_mode,
course_id=self.course.id,
username=self.user.username
)
self.assertResponseMessage(response, msg)
def test_course_without_sku_default(self):
"""
If the course does NOT have a SKU, the user should be enrolled in the course (under the default mode) and
redirected to the user dashboard.
"""
# Remove SKU from all course modes
for course_mode in CourseMode.objects.filter(course_id=self.course.id):
course_mode.sku = None
course_mode.save()
self._test_course_without_sku()
def test_course_without_sku_honor(self):
"""
If the course does not have an SKU and has an honor mode, the user
should be enrolled as honor. This ensures backwards
compatibility with courses existing before the removal of
honor certificates.
"""
# Remove all existing course modes
CourseMode.objects.filter(course_id=self.course.id).delete()
# Ensure that honor mode exists
CourseMode(
mode_slug=CourseMode.HONOR,
mode_display_name="Honor Cert",
course_id=self.course.id
).save()
# We should be enrolled in honor mode
self._test_course_without_sku(enrollment_mode=CourseMode.HONOR)
def assertProfessionalModeBypassed(self):
""" Verifies that the view returns HTTP 406 when a course with no honor or audit mode is encountered. """
CourseMode.objects.filter(course_id=self.course.id).delete()
mode = CourseMode.NO_ID_PROFESSIONAL_MODE
sku_string = uuid4().hex.decode('ascii')
CourseModeFactory.create(course_id=self.course.id, mode_slug=mode, mode_display_name=mode,
sku=sku_string, bulk_sku='BULK-{}'.format(sku_string))
response = self._post_to_view()
# The view should return an error status code
self.assertEqual(response.status_code, 406)
msg = Messages.NO_DEFAULT_ENROLLMENT_MODE.format(course_id=self.course.id)
self.assertResponseMessage(response, msg)
def test_course_with_professional_mode_only(self):
""" Verifies that the view behaves appropriately when the course only has a professional mode. """
self.assertProfessionalModeBypassed()
@override_settings(ECOMMERCE_API_URL=None)
def test_professional_mode_only_and_ecommerce_service_not_configured(self):
"""
Verifies that the view behaves appropriately when the course only has a professional mode and
the E-Commerce Service is not configured.
"""
self.assertProfessionalModeBypassed()
def test_empty_sku(self):
""" If the CourseMode has an empty string for a SKU, the API should not be used. """
# Set SKU to empty string for all modes.
for course_mode in CourseMode.objects.filter(course_id=self.course.id):
course_mode.sku = ''
course_mode.save()
self._test_course_without_sku()
def test_existing_active_enrollment(self):
""" The view should respond with HTTP 409 if the user has an existing active enrollment for the course. """
# Enroll user in the course
CourseEnrollment.enroll(self.user, self.course.id)
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course.id))
response = self._post_to_view()
self.assertEqual(response.status_code, 409)
msg = Messages.ENROLLMENT_EXISTS.format(username=self.user.username, course_id=self.course.id)
self.assertResponseMessage(response, msg)
def test_existing_inactive_enrollment(self):
"""
If the user has an inactive enrollment for the course, the view should behave as if the
user has no enrollment.
"""
# Create an inactive enrollment
CourseEnrollment.enroll(self.user, self.course.id)
CourseEnrollment.unenroll(self.user, self.course.id, True)
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course.id))
self.assertIsNotNone(get_enrollment(self.user.username, unicode(self.course.id)))
@mock.patch('lms.djangoapps.commerce.api.v0.views.update_email_opt_in')
@ddt.data(*itertools.product((False, True), (False, True), (False, True)))
@ddt.unpack
def test_marketing_email_opt_in(self, is_opt_in, has_sku, is_exception, mock_update):
"""
Ensures the email opt-in flag is handled, if present, and that problems handling the
flag don't cause the rest of the enrollment transaction to fail.
"""
if not has_sku:
for course_mode in CourseMode.objects.filter(course_id=self.course.id):
course_mode.sku = None
course_mode.save()
if is_exception:
mock_update.side_effect = Exception("boink")
response = self._post_to_view(marketing_email_opt_in=is_opt_in)
self.assertEqual(mock_update.called, is_opt_in)
self.assertEqual(response.status_code, 200)
def test_closed_course(self):
"""
Verifies that the view returns HTTP 406 when a course is closed.
"""
self.course.enrollment_end = datetime.now(pytz.UTC) - timedelta(days=1)
modulestore().update_item(self.course, self.user.id) # pylint:disable=no-member
self.assertEqual(self._post_to_view().status_code, 406)
@attr(shard=1)
class BasketOrderViewTests(UserMixin, TestCase):
""" Tests for the basket order view. """
view_name = 'commerce_api:v0:baskets:retrieve_order'
MOCK_ORDER = {'number': 1}
path = reverse_lazy(view_name, kwargs={'basket_id': 1})
def setUp(self):
super(BasketOrderViewTests, self).setUp()
self._login()
def test_order_found(self):
""" If the order is located, the view should pass the data from the API. """
with mock_basket_order(basket_id=1, response=self.MOCK_ORDER):
response = self.client.get(self.path)
self.assertEqual(response.status_code, 200)
actual = json.loads(response.content)
self.assertEqual(actual, self.MOCK_ORDER)
def test_order_not_found(self):
""" If the order is not found, the view should return a 404. """
with mock_basket_order(basket_id=1, status=404):
response = self.client.get(self.path)
self.assertEqual(response.status_code, 404)
def test_login_required(self):
""" The view should return 403 if the user is not logged in. """
self.client.logout()
response = self.client.get(self.path)
self.assertEqual(response.status_code, 403)
| agpl-3.0 |
meredith-digops/ansible | test/compile/compile.py | 126 | 2655 | #!/usr/bin/env python
"""Python syntax checker with lint friendly output."""
import os
import parser
import re
import sys
def main():
paths, verbose, skip_patterns = parse_options()
paths = filter_paths(paths, skip_patterns)
check(paths, verbose)
def parse_options():
paths = []
skip_patterns = []
option = None
verbose = False
valid_options = [
'-x',
'-v',
]
for arg in sys.argv[1:]:
if option == '-x':
skip_patterns.append(re.compile(arg))
option = None
elif arg.startswith('-'):
if arg not in valid_options:
raise Exception('Unknown Option: %s' % arg)
if arg == '-v':
verbose = True
else:
option = arg
else:
paths.append(arg)
if option:
raise Exception('Incomplete Option: %s' % option)
return paths, verbose, skip_patterns
def filter_paths(paths, skip_patterns):
if not paths:
paths = ['.']
candidates = paths
paths = []
for candidate in candidates:
if os.path.isdir(candidate):
for root, directories, files in os.walk(candidate):
remove = []
for directory in directories:
if directory.startswith('.'):
remove.append(directory)
for path in remove:
directories.remove(path)
for f in files:
if f.endswith('.py'):
paths.append(os.path.join(root, f))
else:
paths.append(candidate)
final_paths = []
for path in sorted(paths):
skip = False
for skip_pattern in skip_patterns:
if skip_pattern.search(path):
skip = True
break
if skip:
continue
final_paths.append(path)
return final_paths
def check(paths, verbose):
status = 0
for path in paths:
if verbose:
sys.stderr.write('%s\n' % path)
sys.stderr.flush()
source_fd = open(path, 'r')
try:
source = source_fd.read()
finally:
source_fd.close()
try:
parser.suite(source)
except SyntaxError:
ex_type, ex, ex_traceback = sys.exc_info()
status = 1
message = ex.text.splitlines()[0].strip()
sys.stdout.write("%s:%d:%d: SyntaxError: %s\n" % (path, ex.lineno, ex.offset, message))
sys.stdout.flush()
sys.exit(status)
if __name__ == '__main__':
main()
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/python/estimator/canned/parsing_utils.py | 52 | 13199 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parsing related helper function to be used in `input_fn`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import parsing_ops
def classifier_parse_example_spec(feature_columns,
label_key,
label_dtype=dtypes.int64,
label_default=None,
weight_column=None):
"""Generates parsing spec for tf.parse_example to be used with classifiers.
If users keep data in tf.Example format, they need to call tf.parse_example
with a proper feature spec. There are two main things that this utility helps:
* Users need to combine parsing spec of features with labels and weights
(if any) since they are all parsed from same tf.Example instance. This
utility combines these specs.
* It is difficult to map expected label by a classifier such as
`DNNClassifier` to corresponding tf.parse_example spec. This utility encodes
it by getting related information from users (key, dtype).
Example output of parsing spec:
```python
# Define features and transformations
feature_b = tf.feature_column.numeric_column(...)
feature_c_bucketized = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("feature_c"), ...)
feature_a_x_feature_c = tf.feature_column.crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = [feature_b, feature_c_bucketized, feature_a_x_feature_c]
parsing_spec = tf.estimator.classifier_parse_example_spec(
feature_columns, label_key='my-label', label_dtype=tf.string)
# For the above example, classifier_parse_example_spec would return the dict:
assert parsing_spec == {
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
"my-label" : parsing_ops.FixedLenFeature([1], dtype=tf.string)
}
```
Example usage with a classifier:
```python
feature_columns = # define features via tf.feature_column
estimator = DNNClassifier(
n_classes=1000,
feature_columns=feature_columns,
weight_column='example-weight',
label_vocabulary=['photos', 'keep', ...],
hidden_units=[256, 64, 16])
# This label configuration tells the classifier the following:
# * weights are retrieved with key 'example-weight'
# * label is string and can be one of the following ['photos', 'keep', ...]
# * integer id for label 'photos' is 0, 'keep' is 1, ...
# Input builders
def input_fn_train(): # Returns a tuple of features and labels.
features = tf.contrib.learn.read_keyed_batch_features(
file_pattern=train_files,
batch_size=batch_size,
# creates parsing configuration for tf.parse_example
features=tf.estimator.classifier_parse_example_spec(
feature_columns,
label_key='my-label',
label_dtype=tf.string,
weight_column='example-weight'),
reader=tf.RecordIOReader)
labels = features.pop('my-label')
return features, labels
estimator.train(input_fn=input_fn_train)
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `_FeatureColumn`.
label_key: A string identifying the label. It means tf.Example stores labels
with this key.
label_dtype: A `tf.dtype` identifies the type of labels. By default it is
`tf.int64`. If user defines a `label_vocabulary`, this should be set as
`tf.string`. `tf.float32` labels are only supported for binary
classification.
label_default: used as label if label_key does not exist in given
tf.Example. An example usage: let's say `label_key` is 'clicked' and
tf.Example contains clicked data only for positive examples in following
format `key:clicked, value:1`. This means that if there is no data with
key 'clicked' it should count as negative example by setting
`label_deafault=0`. Type of this value should be compatible with
`label_dtype`.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If label is used in `feature_columns`.
ValueError: If weight_column is used in `feature_columns`.
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
ValueError: If `weight_column` is not a `_NumericColumn` instance.
ValueError: if label_key is None.
"""
parsing_spec = fc.make_parse_example_spec(feature_columns)
if label_key in parsing_spec:
raise ValueError('label should not be used as feature. '
'label_key: {}, features: {}'.format(
label_key, parsing_spec.keys()))
parsing_spec[label_key] = parsing_ops.FixedLenFeature((1,), label_dtype,
label_default)
if weight_column is None:
return parsing_spec
if isinstance(weight_column, six.string_types):
weight_column = fc.numeric_column(weight_column)
if not isinstance(weight_column, fc._NumericColumn): # pylint: disable=protected-access
raise ValueError('weight_column should be an instance of '
'tf.feature_column.numeric_column. '
'Given type: {} value: {}'.format(
type(weight_column), weight_column))
if weight_column.key in parsing_spec:
raise ValueError('weight_column should not be used as feature. '
'weight_column: {}, features: {}'.format(
weight_column.key, parsing_spec.keys()))
parsing_spec.update(weight_column._parse_example_spec) # pylint: disable=protected-access
return parsing_spec
def regressor_parse_example_spec(feature_columns,
label_key,
label_dtype=dtypes.float32,
label_default=None,
label_dimension=1,
weight_column=None):
"""Generates parsing spec for tf.parse_example to be used with regressors.
If users keep data in tf.Example format, they need to call tf.parse_example
with a proper feature spec. There are two main things that this utility helps:
* Users need to combine parsing spec of features with labels and weights
(if any) since they are all parsed from same tf.Example instance. This
utility combines these specs.
* It is difficult to map expected label by a regressor such as `DNNRegressor`
to corresponding tf.parse_example spec. This utility encodes it by getting
related information from users (key, dtype).
Example output of parsing spec:
```python
# Define features and transformations
feature_b = tf.feature_column.numeric_column(...)
feature_c_bucketized = tf.feature_column.bucketized_column(
tf.feature_column.numeric_column("feature_c"), ...)
feature_a_x_feature_c = tf.feature_column.crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = [feature_b, feature_c_bucketized, feature_a_x_feature_c]
parsing_spec = tf.estimator.regressor_parse_example_spec(
feature_columns, label_key='my-label')
# For the above example, regressor_parse_example_spec would return the dict:
assert parsing_spec == {
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
"my-label" : parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Example usage with a regressor:
```python
feature_columns = # define features via tf.feature_column
estimator = DNNRegressor(
hidden_units=[256, 64, 16],
feature_columns=feature_columns,
weight_column='example-weight',
label_dimension=3)
# This label configuration tells the regressor the following:
# * weights are retrieved with key 'example-weight'
# * label is a 3 dimension tensor with float32 dtype.
# Input builders
def input_fn_train(): # Returns a tuple of features and labels.
features = tf.contrib.learn.read_keyed_batch_features(
file_pattern=train_files,
batch_size=batch_size,
# creates parsing configuration for tf.parse_example
features=tf.estimator.classifier_parse_example_spec(
feature_columns,
label_key='my-label',
label_dimension=3,
weight_column='example-weight'),
reader=tf.RecordIOReader)
labels = features.pop('my-label')
return features, labels
estimator.train(input_fn=input_fn_train)
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `_FeatureColumn`.
label_key: A string identifying the label. It means tf.Example stores labels
with this key.
label_dtype: A `tf.dtype` identifies the type of labels. By default it is
`tf.float32`.
label_default: used as label if label_key does not exist in given
tf.Example. By default default_value is none, which means
`tf.parse_example` will error out if there is any missing label.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If label is used in `feature_columns`.
ValueError: If weight_column is used in `feature_columns`.
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
ValueError: If `weight_column` is not a `_NumericColumn` instance.
ValueError: if label_key is None.
"""
parsing_spec = fc.make_parse_example_spec(feature_columns)
if label_key in parsing_spec:
raise ValueError('label should not be used as feature. '
'label_key: {}, features: {}'.format(
label_key, parsing_spec.keys()))
parsing_spec[label_key] = parsing_ops.FixedLenFeature(
(label_dimension,), label_dtype, label_default)
if weight_column is None:
return parsing_spec
if isinstance(weight_column, six.string_types):
weight_column = fc.numeric_column(weight_column)
if not isinstance(weight_column, fc._NumericColumn): # pylint: disable=protected-access
raise ValueError('weight_column should be an instance of '
'tf.feature_column.numeric_column. '
'Given type: {} value: {}'.format(
type(weight_column), weight_column))
if weight_column.key in parsing_spec:
raise ValueError('weight_column should not be used as feature. '
'weight_column: {}, features: {}'.format(
weight_column.key, parsing_spec.keys()))
parsing_spec.update(weight_column._parse_example_spec) # pylint: disable=protected-access
return parsing_spec
| mit |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 2/instances/10_0_wikiflow_1sh_1s_annot/longestsession_1/LongestSession_1.py | 11 | 2031 | #!/usr/bin/env python
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_Longest_1
import pprint
import datetime
# connector and config
client = DataStoreClient("mongodb", ConfigDB_Longest_1)
config = ConfigDB_Longest_1
# according to config
dataList = client.getData() # return an array of docs (like a csv reader)
output = []
print "\n"
# """Formats a string containing the user, count, and session."""
if(dataList):
print "Total of windows:", len(dataList)
for i in dataList:
column = i[config.COLUMN]
print "\nNew column: ", column
new_doc = {}
while True:
doc = i['data'].next()
if doc is None:
break;
if "longest session" not in new_doc or new_doc["longest session"] < doc["duration"]:
new_doc["longest session"] = doc["duration"]
new_doc["top user"] = doc["contributor_username"]
new_doc["edition counts"] = doc["edition_counts"]
new_doc["window"] = column
new_doc["h_window"] = datetime.datetime.fromtimestamp(column).strftime('%Y-%m-%d %H:%M:%S')
new_doc["start time"] = doc["start time"]
new_doc["end time"] = doc["end time"]
if "longest session" in new_doc:
print "New doc: ", new_doc, '\n'
output.append(new_doc)
# pprint.pprint(output)
clientOutput = DataStoreClient("mongodb", ConfigDB_Longest_1)
clientOutput.saveData(output)
# 'user1 : [0.0, 3600.002) : 3 : [0.0, 2592000.0)',
# 'user2 : [0.0, 3603.602) : 4 : [0.0, 2592000.0)',
# 'user2 : [7200.0, 10800.0) : 1 : [0.0, 2592000.0)',
# 'user3 : [3024.0, 6624.0) : 1 : [0.0, 2592000.0)',
# { "end time" : 6624, "edition_counts" : 1, "start time" : 3024 }
# { "end time" : 10804, "edition_counts" : 1, "start time" : 7204 } ** salvei errado no db
# { "end time" : 3600.002, "edition_counts" : 3, "start time" : 0 }
# { "end time" : 3603.602, "edition_counts" : 4, "start time" : 0 }
| gpl-3.0 |
saada/ansible | plugins/inventory/vbox.py | 111 | 3090 | #!/usr/bin/env python
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
from subprocess import Popen,PIPE
try:
import json
except ImportError:
import simplejson as json
VBOX="VBoxManage"
def get_hosts(host=None):
returned = {}
try:
if host:
p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
else:
returned = { 'all': set(), '_metadata': {} }
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
except:
sys.exit(1)
hostvars = {}
prevkey = pref_k = ''
for line in p.stdout.readlines():
try:
k,v = line.split(':',1)
except:
continue
if k == '':
continue
v = v.strip()
if k.startswith('Name'):
if v not in hostvars:
curname = v
hostvars[curname] = {}
try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname,"/VirtualBox/GuestInfo/Net/0/V4/IP"],stdout=PIPE)
ipinfo = x.stdout.read()
if 'Value' in ipinfo:
a,ip = ipinfo.split(':',1)
hostvars[curname]['ansible_ssh_host'] = ip.strip()
except:
pass
continue
if not host:
if k == 'Groups':
for group in v.split('/'):
if group:
if group not in returned:
returned[group] = set()
returned[group].add(curname)
returned['all'].add(curname)
continue
pref_k = 'vbox_' + k.strip().replace(' ','_')
if k.startswith(' '):
if prevkey not in hostvars[curname]:
hostvars[curname][prevkey] = {}
hostvars[curname][prevkey][pref_k]= v
else:
if v != '':
hostvars[curname][pref_k] = v
prevkey = pref_k
if not host:
returned['_metadata']['hostvars'] = hostvars
else:
returned = hostvars[host]
return returned
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
if hostname:
inventory = get_hosts(hostname)
else:
inventory = get_hosts()
import pprint
print pprint.pprint(inventory)
| gpl-3.0 |
stuartarchibald/numba | numba/core/datamodel/testing.py | 7 | 5346 | from llvmlite import ir
from llvmlite import binding as ll
from numba.core import datamodel
import unittest
class DataModelTester(unittest.TestCase):
"""
Test the implementation of a DataModel for a frontend type.
"""
fe_type = NotImplemented
def setUp(self):
self.module = ir.Module()
self.datamodel = datamodel.default_manager[self.fe_type]
def test_as_arg(self):
"""
- Is as_arg() and from_arg() implemented?
- Are they the inverse of each other?
"""
fnty = ir.FunctionType(ir.VoidType(), [])
function = ir.Function(self.module, fnty, name="test_as_arg")
builder = ir.IRBuilder()
builder.position_at_end(function.append_basic_block())
undef_value = ir.Constant(self.datamodel.get_value_type(), None)
args = self.datamodel.as_argument(builder, undef_value)
self.assertIsNot(args, NotImplemented, "as_argument returned "
"NotImplementedError")
if isinstance(args, (tuple, list)):
def recur_tuplize(args, func=None):
for arg in args:
if isinstance(arg, (tuple, list)):
yield tuple(recur_tuplize(arg, func=func))
else:
if func is None:
yield arg
else:
yield func(arg)
argtypes = tuple(recur_tuplize(args, func=lambda x: x.type))
exptypes = tuple(recur_tuplize(
self.datamodel.get_argument_type()))
self.assertEqual(exptypes, argtypes)
else:
self.assertEqual(args.type,
self.datamodel.get_argument_type())
rev_value = self.datamodel.from_argument(builder, args)
self.assertEqual(rev_value.type, self.datamodel.get_value_type())
builder.ret_void() # end function
# Ensure valid LLVM generation
materialized = ll.parse_assembly(str(self.module))
str(materialized)
def test_as_return(self):
"""
- Is as_return() and from_return() implemented?
- Are they the inverse of each other?
"""
fnty = ir.FunctionType(ir.VoidType(), [])
function = ir.Function(self.module, fnty, name="test_as_return")
builder = ir.IRBuilder()
builder.position_at_end(function.append_basic_block())
undef_value = ir.Constant(self.datamodel.get_value_type(), None)
ret = self.datamodel.as_return(builder, undef_value)
self.assertIsNot(ret, NotImplemented, "as_return returned "
"NotImplementedError")
self.assertEqual(ret.type, self.datamodel.get_return_type())
rev_value = self.datamodel.from_return(builder, ret)
self.assertEqual(rev_value.type, self.datamodel.get_value_type())
builder.ret_void() # end function
# Ensure valid LLVM generation
materialized = ll.parse_assembly(str(self.module))
str(materialized)
class SupportAsDataMixin(object):
"""Test as_data() and from_data()
"""
# XXX test load_from_data_pointer() as well
def test_as_data(self):
fnty = ir.FunctionType(ir.VoidType(), [])
function = ir.Function(self.module, fnty, name="test_as_data")
builder = ir.IRBuilder()
builder.position_at_end(function.append_basic_block())
undef_value = ir.Constant(self.datamodel.get_value_type(), None)
data = self.datamodel.as_data(builder, undef_value)
self.assertIsNot(data, NotImplemented,
"as_data returned NotImplemented")
self.assertEqual(data.type, self.datamodel.get_data_type())
rev_value = self.datamodel.from_data(builder, data)
self.assertEqual(rev_value.type,
self.datamodel.get_value_type())
builder.ret_void() # end function
# Ensure valid LLVM generation
materialized = ll.parse_assembly(str(self.module))
str(materialized)
class NotSupportAsDataMixin(object):
"""Ensure as_data() and from_data() raise NotImplementedError.
"""
def test_as_data_not_supported(self):
fnty = ir.FunctionType(ir.VoidType(), [])
function = ir.Function(self.module, fnty, name="test_as_data")
builder = ir.IRBuilder()
builder.position_at_end(function.append_basic_block())
undef_value = ir.Constant(self.datamodel.get_value_type(), None)
with self.assertRaises(NotImplementedError):
data = self.datamodel.as_data(builder, undef_value)
with self.assertRaises(NotImplementedError):
rev_data = self.datamodel.from_data(builder, undef_value)
class DataModelTester_SupportAsDataMixin(DataModelTester,
SupportAsDataMixin):
pass
class DataModelTester_NotSupportAsDataMixin(DataModelTester,
NotSupportAsDataMixin):
pass
def test_factory(support_as_data=True):
"""A helper for returning a unittest TestCase for testing
"""
if support_as_data:
return DataModelTester_SupportAsDataMixin
else:
return DataModelTester_NotSupportAsDataMixin
| bsd-2-clause |
thaim/ansible | lib/ansible/modules/cloud/amazon/ecs_taskdefinition_info.py | 10 | 12376 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_taskdefinition_info
short_description: describe a task definition in ecs
notes:
- for details of the parameters and returns see
U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition)
- This module was called C(ecs_taskdefinition_facts) before Ansible 2.9. The usage did not change.
description:
- Describes a task definition in ecs.
version_added: "2.5"
author:
- Gustavo Maia (@gurumaia)
- Mark Chance (@Java1Guy)
- Darek Kaczynski (@kaczynskid)
requirements: [ json, botocore, boto3 ]
options:
task_definition:
description:
- The name of the task definition to get details for
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- ecs_taskdefinition_info:
task_definition: test-td
'''
RETURN = '''
container_definitions:
description: Returns a list of complex objects representing the containers
returned: success
type: complex
contains:
name:
description: The name of a container.
returned: always
type: str
image:
description: The image used to start a container.
returned: always
type: str
cpu:
description: The number of cpu units reserved for the container.
returned: always
type: int
memoryReservation:
description: The soft limit (in MiB) of memory to reserve for the container.
returned: when present
type: int
links:
description: Links to other containers.
returned: when present
type: str
portMappings:
description: The list of port mappings for the container.
returned: always
type: complex
contains:
containerPort:
description: The port number on the container.
returned: when present
type: int
hostPort:
description: The port number on the container instance to reserve for your container.
returned: when present
type: int
protocol:
description: The protocol used for the port mapping.
returned: when present
type: str
essential:
description: Whether this is an essential container or not.
returned: always
type: bool
entryPoint:
description: The entry point that is passed to the container.
returned: when present
type: str
command:
description: The command that is passed to the container.
returned: when present
type: str
environment:
description: The environment variables to pass to a container.
returned: always
type: complex
contains:
name:
description: The name of the environment variable.
returned: when present
type: str
value:
description: The value of the environment variable.
returned: when present
type: str
mountPoints:
description: The mount points for data volumes in your container.
returned: always
type: complex
contains:
sourceVolume:
description: The name of the volume to mount.
returned: when present
type: str
containerPath:
description: The path on the container to mount the host volume at.
returned: when present
type: str
readOnly:
description: If this value is true , the container has read-only access to the volume.
If this value is false , then the container can write to the volume.
returned: when present
type: bool
volumesFrom:
description: Data volumes to mount from another container.
returned: always
type: complex
contains:
sourceContainer:
description: The name of another container within the same task definition to mount volumes from.
returned: when present
type: str
readOnly:
description: If this value is true , the container has read-only access to the volume.
If this value is false , then the container can write to the volume.
returned: when present
type: bool
hostname:
description: The hostname to use for your container.
returned: when present
type: str
user:
description: The user name to use inside the container.
returned: when present
type: str
workingDirectory:
description: The working directory in which to run commands inside the container.
returned: when present
type: str
disableNetworking:
description: When this parameter is true, networking is disabled within the container.
returned: when present
type: bool
privileged:
description: When this parameter is true, the container is given elevated
privileges on the host container instance (similar to the root user).
returned: when present
type: bool
readonlyRootFilesystem:
description: When this parameter is true, the container is given read-only access to its root file system.
returned: when present
type: bool
dnsServers:
description: A list of DNS servers that are presented to the container.
returned: when present
type: str
dnsSearchDomains:
description: A list of DNS search domains that are presented to the container.
returned: when present
type: str
extraHosts:
description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
returned: when present
type: complex
contains:
hostname:
description: The hostname to use in the /etc/hosts entry.
returned: when present
type: str
ipAddress:
description: The IP address to use in the /etc/hosts entry.
returned: when present
type: str
dockerSecurityOptions:
description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
returned: when present
type: str
dockerLabels:
description: A key/value map of labels to add to the container.
returned: when present
type: str
ulimits:
description: A list of ulimits to set in the container.
returned: when present
type: complex
contains:
name:
description: The type of the ulimit .
returned: when present
type: str
softLimit:
description: The soft limit for the ulimit type.
returned: when present
type: int
hardLimit:
description: The hard limit for the ulimit type.
returned: when present
type: int
logConfiguration:
description: The log configuration specification for the container.
returned: when present
type: str
options:
description: The configuration options to send to the log driver.
returned: when present
type: str
family:
description: The family of your task definition, used as the definition name
returned: always
type: str
task_definition_arn:
description: ARN of the task definition
returned: always
type: str
task_role_arn:
description: The ARN of the IAM role that containers in this task can assume
returned: when role is set
type: str
network_mode:
description: Network mode for the containers
returned: always
type: str
revision:
description: Revision number that was queried
returned: always
type: int
volumes:
description: The list of volumes in a task
returned: always
type: complex
contains:
name:
description: The name of the volume.
returned: when present
type: str
host:
description: The contents of the host parameter determine whether your data volume
persists on the host container instance and where it is stored.
returned: when present
type: bool
source_path:
description: The path on the host container instance that is presented to the container.
returned: when present
type: str
status:
description: The status of the task definition
returned: always
type: str
requires_attributes:
description: The container instance attributes required by your task
returned: when present
type: complex
contains:
name:
description: The name of the attribute.
returned: when present
type: str
value:
description: The value of the attribute.
returned: when present
type: str
targetType:
description: The type of the target with which to attach the attribute.
returned: when present
type: str
targetId:
description: The ID of the target.
returned: when present
type: str
placement_constraints:
description: A list of placement constraint objects to use for tasks
returned: always
type: complex
contains:
type:
description: The type of constraint.
returned: when present
type: str
expression:
description: A cluster query language expression to apply to the constraint.
returned: when present
type: str
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported AnsibleAWSModule
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
task_definition=dict(required=True, type='str')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ecs_taskdefinition_facts':
module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'", version='2.13')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
ecs = boto3_conn(module, conn_type='client', resource='ecs',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
try:
ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
except botocore.exceptions.ClientError:
ecs_td = {}
module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
if __name__ == '__main__':
main()
| mit |
sid88in/incubator-airflow | tests/contrib/hooks/test_wasb_hook.py | 2 | 8348 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from collections import namedtuple
from airflow import configuration, AirflowException
from airflow import models
from airflow.contrib.hooks.wasb_hook import WasbHook
from airflow.utils import db
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestWasbHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='wasb_test_key', conn_type='wasb',
login='login', password='key'
)
)
db.merge_conn(
models.Connection(
conn_id='wasb_test_sas_token', conn_type='wasb',
login='login', extra=json.dumps({'sas_token': 'token'})
)
)
def test_key(self):
from azure.storage.blob import BlockBlobService
hook = WasbHook(wasb_conn_id='wasb_test_key')
self.assertEqual(hook.conn_id, 'wasb_test_key')
self.assertIsInstance(hook.connection, BlockBlobService)
def test_sas_token(self):
from azure.storage.blob import BlockBlobService
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
self.assertEqual(hook.conn_id, 'wasb_test_sas_token')
self.assertIsInstance(hook.connection, BlockBlobService)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_check_for_blob(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.exists.return_value = True
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
self.assertTrue(hook.check_for_blob('container', 'blob', timeout=3))
mock_instance.exists.assert_called_once_with(
'container', 'blob', timeout=3
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_check_for_blob_empty(self, mock_service):
mock_service.return_value.exists.return_value = False
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
self.assertFalse(hook.check_for_blob('container', 'blob'))
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_check_for_prefix(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.list_blobs.return_value = iter(['blob_1'])
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
self.assertTrue(hook.check_for_prefix('container', 'prefix',
timeout=3))
mock_instance.list_blobs.assert_called_once_with(
'container', 'prefix', num_results=1, timeout=3
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_check_for_prefix_empty(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.list_blobs.return_value = iter([])
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
self.assertFalse(hook.check_for_prefix('container', 'prefix'))
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_load_file(self, mock_service):
mock_instance = mock_service.return_value
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
hook.load_file('path', 'container', 'blob', max_connections=1)
mock_instance.create_blob_from_path.assert_called_once_with(
'container', 'blob', 'path', max_connections=1
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_load_string(self, mock_service):
mock_instance = mock_service.return_value
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
hook.load_string('big string', 'container', 'blob', max_connections=1)
mock_instance.create_blob_from_text.assert_called_once_with(
'container', 'blob', 'big string', max_connections=1
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_get_file(self, mock_service):
mock_instance = mock_service.return_value
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
hook.get_file('path', 'container', 'blob', max_connections=1)
mock_instance.get_blob_to_path.assert_called_once_with(
'container', 'blob', 'path', max_connections=1
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_read_file(self, mock_service):
mock_instance = mock_service.return_value
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
hook.read_file('container', 'blob', max_connections=1)
mock_instance.get_blob_to_text.assert_called_once_with(
'container', 'blob', max_connections=1
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_delete_single_blob(self, mock_service):
mock_instance = mock_service.return_value
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
hook.delete_file('container', 'blob', is_prefix=False)
mock_instance.delete_blob.assert_called_once_with(
'container', 'blob', delete_snapshots='include'
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_delete_multiple_blobs(self, mock_service):
mock_instance = mock_service.return_value
Blob = namedtuple('Blob', ['name'])
mock_instance.list_blobs.return_value = iter(
[Blob('blob_prefix/blob1'), Blob('blob_prefix/blob2')]
)
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
hook.delete_file('container', 'blob_prefix', is_prefix=True)
mock_instance.delete_blob.assert_any_call(
'container', 'blob_prefix/blob1', delete_snapshots='include'
)
mock_instance.delete_blob.assert_any_call(
'container', 'blob_prefix/blob2', delete_snapshots='include'
)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_delete_nonexisting_blob_fails(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.exists.return_value = False
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
with self.assertRaises(Exception) as context:
hook.delete_file(
'container', 'nonexisting_blob',
is_prefix=False, ignore_if_missing=False
)
self.assertIsInstance(context.exception, AirflowException)
@mock.patch('airflow.contrib.hooks.wasb_hook.BlockBlobService',
autospec=True)
def test_delete_multiple_nonexisting_blobs_fails(self, mock_service):
mock_instance = mock_service.return_value
mock_instance.list_blobs.return_value = iter([])
hook = WasbHook(wasb_conn_id='wasb_test_sas_token')
with self.assertRaises(Exception) as context:
hook.delete_file(
'container', 'nonexisting_blob_prefix',
is_prefix=True, ignore_if_missing=False
)
self.assertIsInstance(context.exception, AirflowException)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
lukfor/mkdocs | mkdocs/nav.py | 1 | 11362 | # coding: utf-8
"""
Deals with generating the site-wide navigation.
This consists of building a set of interlinked page and header objects.
"""
from __future__ import unicode_literals
import datetime
import logging
import os
from mkdocs import utils, exceptions
log = logging.getLogger(__name__)
def filename_to_title(filename):
"""
Automatically generate a default title, given a filename.
"""
if utils.is_homepage(filename):
return 'Home'
return utils.filename_to_title(filename)
class SiteNavigation(object):
def __init__(self, pages_config, use_directory_urls=True):
self.url_context = URLContext()
self.file_context = FileContext()
self.nav_items, self.pages = _generate_site_navigation(
pages_config, self.url_context, use_directory_urls)
self.homepage = self.pages[0] if self.pages else None
self.use_directory_urls = use_directory_urls
def __str__(self):
return ''.join([str(item) for item in self])
def __iter__(self):
return iter(self.nav_items)
def __len__(self):
return len(self.nav_items)
def walk_pages(self):
"""
Returns each page in the site in turn.
Additionally this sets the active status of the pages and headers,
in the site navigation, so that the rendered navbar can correctly
highlight the currently active page and/or header item.
"""
page = self.homepage
page.set_active()
self.url_context.set_current_url(page.abs_url)
self.file_context.set_current_path(page.input_path)
yield page
while page.next_page:
page.set_active(False)
page = page.next_page
page.set_active()
self.url_context.set_current_url(page.abs_url)
self.file_context.set_current_path(page.input_path)
yield page
page.set_active(False)
@property
def source_files(self):
if not hasattr(self, '_source_files'):
self._source_files = set([page.input_path for page in self.pages])
return self._source_files
class URLContext(object):
"""
The URLContext is used to ensure that we can generate the appropriate
relative URLs to other pages from any given page in the site.
We use relative URLs so that static sites can be deployed to any location
without having to specify what the path component on the host will be
if the documentation is not hosted at the root path.
"""
def __init__(self):
self.base_path = '/'
self.force_abs_urls = False
def set_current_url(self, current_url):
self.base_path = os.path.dirname(current_url)
def make_relative(self, url):
"""
Given a URL path return it as a relative URL,
given the context of the current page.
"""
if self.force_abs_urls:
abs_url = '%s/%s' % (self.base_path.rstrip('/'), utils.path_to_url(url.lstrip('/')))
return abs_url
suffix = '/' if (url.endswith('/') and len(url) > 1) else ''
# Workaround for bug on `os.path.relpath()` in Python 2.6
if self.base_path == '/':
if url == '/':
# Workaround for static assets
return '.'
return url.lstrip('/')
# Under Python 2.6, relative_path adds an extra '/' at the end.
relative_path = os.path.relpath(url, start=self.base_path)
relative_path = relative_path.rstrip('/') + suffix
return utils.path_to_url(relative_path)
class FileContext(object):
"""
The FileContext is used to ensure that we can generate the appropriate
full path for other pages given their relative path from a particular page.
This is used when we have relative hyperlinks in the documentation, so that
we can ensure that they point to markdown documents that actually exist
in the `pages` config.
"""
def __init__(self):
self.current_file = None
self.base_path = ''
def set_current_path(self, current_path):
self.current_file = current_path
self.base_path = os.path.dirname(current_path)
def make_absolute(self, path):
"""
Given a relative file path return it as a POSIX-style
absolute filepath, given the context of the current page.
"""
return os.path.normpath(os.path.join(self.base_path, path))
class Page(object):
def __init__(self, title, url, path, url_context):
self.title = title
self.abs_url = url
self.active = False
self.url_context = url_context
# Support SOURCE_DATE_EPOCH environment variable for "reproducible" builds.
# See https://reproducible-builds.org/specs/source-date-epoch/
if 'SOURCE_DATE_EPOCH' in os.environ:
self.update_date = datetime.datetime.utcfromtimestamp(
int(os.environ['SOURCE_DATE_EPOCH'])
).strftime("%Y-%m-%d")
else:
self.update_date = datetime.datetime.now().strftime("%Y-%m-%d")
# Relative paths to the input markdown file and output html file.
self.input_path = path
self.output_path = utils.get_html_path(path)
# Links to related pages
self.previous_page = None
self.next_page = None
self.ancestors = []
# Placeholders to be filled in later in the build
# process when we have access to the config.
self.canonical_url = None
self.edit_url = None
self.content = None
self.meta = None
self.toc = None
@property
def url(self):
return self.url_context.make_relative(self.abs_url)
@property
def is_homepage(self):
return utils.is_homepage(self.input_path)
@property
def is_top_level(self):
return len(self.ancestors) == 0
def __str__(self):
return self.indent_print()
def indent_print(self, depth=0):
indent = ' ' * depth
active_marker = ' [*]' if self.active else ''
title = self.title if (self.title is not None) else '[blank]'
return '%s%s - %s%s\n' % (indent, title, self.abs_url, active_marker)
def set_active(self, active=True):
self.active = active
for ancestor in self.ancestors:
ancestor.set_active(active)
def set_canonical_url(self, base):
if not base.endswith('/'):
base += '/'
self.canonical_url = utils.urljoin(base, self.abs_url.lstrip('/'))
def set_edit_url(self, repo_url, edit_uri):
if not repo_url.endswith('/'):
# Skip when using query or fragment in edit_uri
if not edit_uri.startswith('?') and not edit_uri.startswith('#'):
repo_url += '/'
if not edit_uri:
self.edit_url = repo_url
else:
# Normalize URL from Windows path '\\' -> '/'
input_path_url = self.input_path.replace('\\', '/')
if not edit_uri.endswith('/'):
edit_uri += '/'
self.edit_url = utils.urljoin(
repo_url,
edit_uri + input_path_url)
class Header(object):
def __init__(self, title, children):
self.title, self.children = title, children
self.active = False
self.ancestors = []
def __str__(self):
return self.indent_print()
@property
def is_top_level(self):
return len(self.ancestors) == 0
def indent_print(self, depth=0):
indent = ' ' * depth
active_marker = ' [*]' if self.active else ''
ret = '%s%s%s\n' % (indent, self.title, active_marker)
for item in self.children:
ret += item.indent_print(depth + 1)
return ret
def set_active(self, active=True):
self.active = active
for ancestor in self.ancestors:
ancestor.set_active(active)
def _path_to_page(path, title, url_context, use_directory_urls):
if title is None:
title = filename_to_title(path.split(os.path.sep)[-1])
url = utils.get_url_path(path, use_directory_urls)
return Page(title=title, url=url, path=path,
url_context=url_context)
def _follow(config_line, url_context, use_dir_urls, header=None, title=None):
if isinstance(config_line, utils.string_types):
path = os.path.normpath(config_line)
page = _path_to_page(path, title, url_context, use_dir_urls)
if header:
page.ancestors = header.ancestors + [header, ]
header.children.append(page)
yield page
raise StopIteration
elif not isinstance(config_line, dict):
msg = ("Line in 'page' config is of type {0}, dict or string "
"expected. Config: {1}").format(type(config_line), config_line)
raise exceptions.ConfigurationError(msg)
if len(config_line) > 1:
raise exceptions.ConfigurationError(
"Page configs should be in the format 'name: markdown.md'. The "
"config contains an invalid entry: {0}".format(config_line))
elif len(config_line) == 0:
log.warning("Ignoring empty line in the pages config.")
raise StopIteration
next_cat_or_title, subpages_or_path = next(iter(config_line.items()))
if isinstance(subpages_or_path, utils.string_types):
path = subpages_or_path
for sub in _follow(path, url_context, use_dir_urls, header=header, title=next_cat_or_title):
yield sub
raise StopIteration
elif not isinstance(subpages_or_path, list):
msg = ("Line in 'page' config is of type {0}, list or string "
"expected for sub pages. Config: {1}"
).format(type(config_line), config_line)
raise exceptions.ConfigurationError(msg)
next_header = Header(title=next_cat_or_title, children=[])
if header:
next_header.ancestors = [header]
header.children.append(next_header)
yield next_header
subpages = subpages_or_path
for subpage in subpages:
for sub in _follow(subpage, url_context, use_dir_urls, next_header):
yield sub
def _generate_site_navigation(pages_config, url_context, use_dir_urls=True):
"""
Returns a list of Page and Header instances that represent the
top level site navigation.
"""
nav_items = []
pages = []
previous = None
for config_line in pages_config:
for page_or_header in _follow(
config_line, url_context, use_dir_urls):
if isinstance(page_or_header, Header):
if page_or_header.is_top_level:
nav_items.append(page_or_header)
elif isinstance(page_or_header, Page):
if page_or_header.is_top_level:
nav_items.append(page_or_header)
pages.append(page_or_header)
if previous:
page_or_header.previous_page = previous
previous.next_page = page_or_header
previous = page_or_header
if len(pages) == 0:
raise exceptions.ConfigurationError(
"No pages found in the pages config. "
"Remove it entirely to enable automatic page discovery.")
return (nav_items, pages)
| bsd-2-clause |
vmx/git-repo | command.py | 9 | 6933 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import platform
import re
import sys
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
manifest = None
_optparse = None
def WantPager(self, opt):
return False
def ReadEnvironmentOptions(self, opts):
""" Set options from environment variables. """
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set it
# with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
self._optparse = optparse.OptionParser(usage = usage)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, path):
project = None
if os.path.exists(path):
oldpath = None
while path \
and path != oldpath \
and path != self.manifest.topdir:
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, missing_ok=False, submodules_ok=False):
"""A list of projects that match the arguments.
"""
all_projects = self.manifest.projects
result = []
mp = self.manifest.manifestProject
groups = mp.config.GetString('manifest.groups')
if not groups:
groups = 'default,platform-' + platform.system().lower()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
all_projects_list = list(all_projects.values())
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if ((missing_ok or project.Exists) and
project.MatchesGroups(groups)):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects.values())
for arg in args:
project = all_projects.get(arg)
if not project:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(path)
# If it's not a derived project, update path->project mapping and
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and
(submodules_ok or project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(path) or project
if not project:
raise NoSuchProjectError(arg)
if not missing_ok and not project.Exists:
raise NoSuchProjectError(arg)
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.append(project)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
if pattern.search(project.name) or pattern.search(project.relpath):
result.append(project)
break
result.sort(key=lambda project: project.relpath)
return result
# pylint: disable=W0223
# Pylint warns that the `InteractiveCommand` and `PagedCommand` classes do not
# override method `Execute` which is abstract in `Command`. Since that method
# is always implemented in classes derived from `InteractiveCommand` and
# `PagedCommand`, this warning can be suppressed.
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, opt):
return True
# pylint: enable=W0223
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
| apache-2.0 |
Sing-Li/go-buildpack | builds/runtimes/python-2.7.6/lib/python2.7/test/audiotests.py | 8 | 7577 | from test.test_support import findfile, TESTFN, unlink
import unittest
import array
import io
import pickle
import sys
import base64
def fromhex(s):
return base64.b16decode(s.replace(' ', ''))
def byteswap2(data):
a = array.array('h', data)
a.byteswap()
return a.tostring()
def byteswap3(data):
ba = bytearray(data)
ba[::3] = data[2::3]
ba[2::3] = data[::3]
return bytes(ba)
def byteswap4(data):
a = array.array('i', data)
a.byteswap()
return a.tostring()
class AudioTests:
close_fd = False
def setUp(self):
self.f = self.fout = None
def tearDown(self):
if self.f is not None:
self.f.close()
if self.fout is not None:
self.fout.close()
unlink(TESTFN)
def check_params(self, f, nchannels, sampwidth, framerate, nframes,
comptype, compname):
self.assertEqual(f.getnchannels(), nchannels)
self.assertEqual(f.getsampwidth(), sampwidth)
self.assertEqual(f.getframerate(), framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.getcomptype(), comptype)
self.assertEqual(f.getcompname(), compname)
params = f.getparams()
self.assertEqual(params,
(nchannels, sampwidth, framerate, nframes, comptype, compname))
dump = pickle.dumps(params)
self.assertEqual(pickle.loads(dump), params)
class AudioWriteTests(AudioTests):
def create_file(self, testfile):
f = self.fout = self.module.open(testfile, 'wb')
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
return f
def check_file(self, testfile, nframes, frames):
f = self.module.open(testfile, 'rb')
try:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.readframes(nframes), frames)
finally:
f.close()
def test_write_params(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.nframes, self.comptype, self.compname)
f.close()
def test_write(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_incompleted_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_multiple_writes(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes)
framesize = self.nchannels * self.sampwidth
f.writeframes(self.frames[:-framesize])
f.writeframes(self.frames[-framesize:])
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_overflowed_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
class AudioTestsWithSourceFile(AudioTests):
@classmethod
def setUpClass(cls):
cls.sndfilepath = findfile(cls.sndfilename, subdir='audiodata')
def test_read_params(self):
f = self.f = self.module.open(self.sndfilepath)
#self.assertEqual(f.getfp().name, self.sndfilepath)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.sndfilenframes, self.comptype, self.compname)
def test_close(self):
with open(self.sndfilepath, 'rb') as testfile:
f = self.f = self.module.open(testfile)
self.assertFalse(testfile.closed)
f.close()
self.assertEqual(testfile.closed, self.close_fd)
with open(TESTFN, 'wb') as testfile:
fout = self.fout = self.module.open(testfile, 'wb')
self.assertFalse(testfile.closed)
with self.assertRaises(self.module.Error):
fout.close()
self.assertEqual(testfile.closed, self.close_fd)
fout.close() # do nothing
def test_read(self):
framesize = self.nchannels * self.sampwidth
chunk1 = self.frames[:2 * framesize]
chunk2 = self.frames[2 * framesize: 4 * framesize]
f = self.f = self.module.open(self.sndfilepath)
self.assertEqual(f.readframes(0), b'')
self.assertEqual(f.tell(), 0)
self.assertEqual(f.readframes(2), chunk1)
f.rewind()
pos0 = f.tell()
self.assertEqual(pos0, 0)
self.assertEqual(f.readframes(2), chunk1)
pos2 = f.tell()
self.assertEqual(pos2, 2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos0)
self.assertEqual(f.readframes(2), chunk1)
with self.assertRaises(self.module.Error):
f.setpos(-1)
with self.assertRaises(self.module.Error):
f.setpos(f.getnframes() + 1)
def test_copy(self):
f = self.f = self.module.open(self.sndfilepath)
fout = self.fout = self.module.open(TESTFN, 'wb')
fout.setparams(f.getparams())
i = 0
n = f.getnframes()
while n > 0:
i += 1
fout.writeframes(f.readframes(i))
n -= i
fout.close()
fout = self.fout = self.module.open(TESTFN, 'rb')
f.rewind()
self.assertEqual(f.getparams(), fout.getparams())
self.assertEqual(f.readframes(f.getnframes()),
fout.readframes(fout.getnframes()))
def test_read_not_from_start(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
with open(self.sndfilepath, 'rb') as f:
testfile.write(f.read())
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
f = self.module.open(testfile, 'rb')
try:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), self.sndfilenframes)
self.assertEqual(f.readframes(self.nframes), self.frames)
finally:
f.close()
| mit |
justincassidy/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
sebastic/QGIS | python/plugins/processing/gui/NumberInputDialog.py | 3 | 6379 | # -*- coding: utf-8 -*-
"""
***************************************************************************
NumberInputDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import uic
from PyQt4.QtGui import QDialog, QTreeWidgetItem, QMessageBox
from qgis.core import QgsRasterLayer
from qgis.utils import iface
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgNumberInput.ui'))
class NumberInputDialog(BASE, WIDGET):
def __init__(self, isInteger):
super(NumberInputDialog, self).__init__(None)
self.setupUi(self)
if hasattr(self.leFormula, 'setPlaceholderText'):
self.leFormula.setPlaceholderText(
self.tr('[Enter your formula here]'))
self.treeValues.doubleClicked.connect(self.addValue)
self.value = None
self.isInteger = isInteger
if not self.isInteger:
self.lblWarning.hide()
self.fillTree()
def fillTree(self):
layersItem = QTreeWidgetItem()
layersItem.setText(0, self.tr('Values from data layers extents'))
self.treeValues.addTopLevelItem(layersItem)
layers = dataobjects.getAllLayers()
for layer in layers:
layerItem = QTreeWidgetItem()
layerItem.setText(0, unicode(layer.name()))
layerItem.addChild(TreeValueItem(self.tr('Min X'),
layer.extent().xMinimum()))
layerItem.addChild(TreeValueItem(self.tr('Max X'),
layer.extent().xMaximum()))
layerItem.addChild(TreeValueItem(self.tr('Min Y'),
layer.extent().yMinimum()))
layerItem.addChild(TreeValueItem(self.tr('Max Y'),
layer.extent().yMaximum()))
if isinstance(layer, QgsRasterLayer):
cellsize = (layer.extent().xMaximum()
- layer.extent().xMinimum()) / layer.width()
layerItem.addChild(TreeValueItem(self.tr('Cellsize'),
cellsize))
layersItem.addChild(layerItem)
layersItem = QTreeWidgetItem()
layersItem.setText(0, self.tr('Values from raster layers statistics'))
self.treeValues.addTopLevelItem(layersItem)
layers = dataobjects.getRasterLayers()
for layer in layers:
for i in range(layer.bandCount()):
stats = layer.dataProvider().bandStatistics(i + 1)
layerItem = QTreeWidgetItem()
layerItem.setText(0, unicode(layer.name()))
layerItem.addChild(TreeValueItem(self.tr('Mean'), stats.mean))
layerItem.addChild(TreeValueItem(self.tr('Std. deviation'),
stats.stdDev))
layerItem.addChild(TreeValueItem(self.tr('Max value'),
stats.maximumValue))
layerItem.addChild(TreeValueItem(self.tr('Min value'),
stats.minimumValue))
layersItem.addChild(layerItem)
canvasItem = QTreeWidgetItem()
canvasItem.setText(0, self.tr('Values from QGIS map canvas'))
self.treeValues.addTopLevelItem(canvasItem)
extent = iface.mapCanvas().extent()
extentItem = QTreeWidgetItem()
extentItem.setText(0, self.tr('Current extent'))
extentItem.addChild(TreeValueItem(self.tr('Min X'), extent.xMinimum()))
extentItem.addChild(TreeValueItem(self.tr('Max X'), extent.xMaximum()))
extentItem.addChild(TreeValueItem(self.tr('Min Y'), extent.yMinimum()))
extentItem.addChild(TreeValueItem(self.tr('Max Y'), extent.yMaximum()))
canvasItem.addChild(extentItem)
extent = iface.mapCanvas().fullExtent()
extentItem = QTreeWidgetItem()
extentItem.setText(0,
self.tr('Full extent of all layers in map canvas'))
extentItem.addChild(TreeValueItem(self.tr('Min X'), extent.xMinimum()))
extentItem.addChild(TreeValueItem(self.tr('Max X'), extent.xMaximum()))
extentItem.addChild(TreeValueItem(self.tr('Min Y'), extent.yMinimum()))
extentItem.addChild(TreeValueItem(self.tr('Max Y'), extent.yMaximum()))
canvasItem.addChild(extentItem)
def addValue(self):
item = self.treeValues.currentItem()
if isinstance(item, TreeValueItem):
formula = self.leFormula.text() + ' ' + unicode(item.value)
self.leFormula.setText(formula.strip())
def accept(self):
try:
self.value = float(eval(unicode(self.leFormula.text())))
if self.isInteger:
self.value = int(round(self.value))
QDialog.accept(self)
except:
QMessageBox.critical(self, self.tr('Wrong expression'),
self.tr('The expression entered is not correct'))
def reject(self):
self.value = None
QDialog.reject(self)
class TreeValueItem(QTreeWidgetItem):
def __init__(self, name, value):
QTreeWidgetItem.__init__(self)
self.value = value
self.setText(0, name + ': ' + unicode(value))
| gpl-2.0 |
stargaser/astropy | astropy/table/row.py | 2 | 6611 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
from collections import OrderedDict
from operator import index as operator_index
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
# Ensure that the row index is a valid index (int)
index = operator_index(index)
n = len(table)
if index < -n or index >= n:
raise IndexError('index {} out of range for table with length {}'
.format(index, len(table)))
# Finally, ensure the index is positive [#8422] and set Row attributes
self._index = index % n
self._table = table
def __getitem__(self, item):
try:
# Try the most common use case of accessing a single column in the Row.
# Bypass the TableColumns __getitem__ since that does more testing
# and allows a list of tuple or str, which is not the right thing here.
out = OrderedDict.__getitem__(self._table.columns, item)[self._index]
except (KeyError, TypeError):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
# This is only to raise an exception
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
def keys(self):
return self._table.columns.keys()
def values(self):
return self.__iter__()
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : np.void (unmasked) or np.ma.mvoid (masked)
Copy of row values
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
# The logic here is a little complicated to work around
# bug in numpy < 1.8 (numpy/numpy#483). Need to build up
# a np.ma.mvoid object by hand.
from .table import descr
# Make np.void version of masks. Use the table dtype but
# substitute bool for data type
masks = tuple(col.mask[index] if hasattr(col, 'mask') else False
for col in cols)
descrs = (descr(col) for col in cols)
mask_dtypes = [(name, bool, shape) for name, type_, shape in descrs]
row_mask = np.array([masks], dtype=mask_dtypes)[0]
# Make np.void version of values, and then the final mvoid row
row_vals = np.array([vals], dtype=self.dtype)[0]
void_row = np.ma.mvoid(data=row_vals, mask=row_mask)
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index:index + 1]
descr_vals = [self.__class__.__name__,
f'index={self.index}']
if table.masked:
descr_vals.append('masked=True')
return table._base_repr_(html, descr_vals, max_width=-1,
tableid='table{}'.format(id(self._table)))
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return '\n'.join(self.table[index:index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode('utf-8')
collections.abc.Sequence.register(Row)
| bsd-3-clause |
Distrotech/scons | build/scons/engine/SCons/Tool/g77.py | 2 | 2497 | """engine.SCons.Tool.g77
Tool-specific initialization for g77.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/g77.py 2014/01/04 01:12:18 root"
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f77_to_env
compilers = ['g77', 'f77']
def generate(env):
"""Add Builders and construction variables for g77 to an Environment."""
add_all_to_env(env)
add_f77_to_env(env)
fcomp = env.Detect(compilers) or 'g77'
if env['PLATFORM'] in ['cygwin', 'win32']:
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS')
env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS')
else:
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -fPIC')
env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS -fPIC')
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['F77'] = fcomp
env['SHF77'] = '$F77'
env['INCFORTRANPREFIX'] = "-I"
env['INCFORTRANSUFFIX'] = ""
env['INCF77PREFIX'] = "-I"
env['INCF77SUFFIX'] = ""
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
miloszz/DIRAC | Core/Utilities/test/TracedTests.py | 10 | 2744 | ########################################################################
# $HeadURL $
# File: TracedTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/08/08 15:21:32
########################################################################
""" :mod: TracedTests
=======================
.. module: TracedTests
:synopsis: Traced test cases
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
Traced test cases
"""
__RCSID__ = "$Id $"
##
# @file TracedTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/08/08 15:21:44
# @brief Definition of TracedTests class.
## imports
import unittest
## SUT
from DIRAC.Core.Utilities.Traced import Traced, TracedDict, TracedList
########################################################################
class TracedTests(unittest.TestCase):
"""
.. class:: TracedTests
"""
def setUp( self ):
"""c'tor
:param self: self reference
"""
self.tracedDict = TracedDict( { 1 : 1 } )
self.tracedList = TracedList( [ 1 ] )
class TracedClass( object ):
__metaclass__ = Traced
classArg = None
def __init__( self ):
instanceArg = None
self.tracedClass = TracedClass()
def testTarcedDict( self ):
""" TracedDict tests """
self.assertEqual( self.tracedDict.updated(), [] )
## update, not changing value
self.tracedDict[1] = 1
self.assertEqual( self.tracedDict.updated(), [] )
## update, changing value
self.tracedDict[1] = 2
self.assertEqual( self.tracedDict.updated(), [1] )
## set new
self.tracedDict[2] = 2
self.assertEqual( self.tracedDict.updated(), [ 1, 2 ] )
## update from diff dict
self.tracedDict.update( { 3: 3 } )
self.assertEqual( self.tracedDict.updated(), [ 1, 2, 3 ] )
def testTracedList( self ):
""" traced list """
self.assertEqual( self.tracedList.updated(), [] )
## no value change
self.tracedList[0] = 1
self.assertEqual( self.tracedList.updated(), [] )
## value change
self.tracedList[0] = 2
self.assertEqual( self.tracedList.updated(), [0] )
## append
self.tracedList.append( 1 )
self.assertEqual( self.tracedList.updated(), [0, 1] )
def testTracedClass( self ):
""" traced class """
self.assertEqual( self.tracedClass.updated(), [] )
self.tracedClass.instanceArg = 1
self.assertEqual( self.tracedClass.updated(), [ "instanceArg" ] )
self.tracedClass.classArg = 1
self.assertEqual( self.tracedClass.updated(), [ "instanceArg" , "classArg" ] )
## test execution
if __name__ == "__main__":
TESTLOADER = unittest.TestLoader()
SUITE = TESTLOADER.loadTestsFromTestCase( TracedTests )
unittest.TextTestRunner(verbosity=3).run( SUITE )
| gpl-3.0 |
kurli/blink-crosswalk | Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py | 22 | 48279 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import *
from webkitpy.layout_tests.models.test_expectations import *
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
class Base(unittest.TestCase):
# Note that all of these tests are written assuming the configuration
# being tested is Windows XP, Release build.
def __init__(self, testFunc):
host = MockHost()
self._port = host.port_factory.get('test-win-xp', None)
self._exp = None
unittest.TestCase.__init__(self, testFunc)
def get_basic_tests(self):
return ['failures/expected/text.html',
'failures/expected/image_checksum.html',
'failures/expected/crash.html',
'failures/expected/needsrebaseline.html',
'failures/expected/needsmanualrebaseline.html',
'failures/expected/missing_text.html',
'failures/expected/image.html',
'failures/expected/timeout.html',
'passes/text.html']
def get_basic_expectations(self):
return """
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/crash.html [ WontFix ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
Bug(test) failures/expected/image_checksum.html [ WontFix ]
Bug(test) failures/expected/image.html [ WontFix Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
self._port.expectations_dict = lambda: expectations_dict
expectations_to_lint = expectations_dict if is_lint_mode else None
self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_dict=expectations_to_lint, is_lint_mode=is_lint_mode)
def assert_exp_list(self, test, results):
self.assertEqual(self._exp.get_expectations(test), set(results))
def assert_exp(self, test, result):
self.assert_exp_list(test, [result])
def assert_bad_expectations(self, expectations, overrides=None):
self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/image_checksum.html', [WONTFIX, SKIP])
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations('failures/expected/text.html'), set([FAIL, CRASH]))
def test_result_was_expected(self):
# test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False), False)
# test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([LEAK]), test_needs_rebaselining=False), True)
# test handling of MISSING results and the REBASELINE specifier
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True), True)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False), False)
self.assertTrue(TestExpectations.result_was_expected(PASS, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(MISSING, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE_PLUS_TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(AUDIO, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(TIMEOUT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(CRASH, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(LEAK, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
def test_suffixes_for_expectations(self):
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
exp_str = 'Bug(x) failures/expected [ WontFix ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = test_name
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
def test_get_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_expectations_string('failures/expected/text.html'), 'FAIL')
def test_expectation_to_string(self):
# Normal cases are handled by other tests.
self.parse_exp(self.get_basic_expectations())
self.assertRaises(ValueError, self._exp.expectation_to_string,
-1)
def test_get_test_set(self):
# Handle some corner cases for this routine not covered by other tests.
self.parse_exp(self.get_basic_expectations())
s = self._exp.get_test_set(WONTFIX)
self.assertEqual(s, set(['failures/expected/crash.html', 'failures/expected/image_checksum.html']))
def test_needs_rebaseline_reftest(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline-expected.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline-expected.html'), 'content')
self.parse_exp("""Bug(user) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(user) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]""", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = """expectations:1 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsrebaseline.html
expectations:2 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsmanualrebaseline.html"""
self.assertEqual(str(e), warnings)
def test_parse_warning(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'test-to-rebaseline.html'), 'content')
'disabled-test.html-disabled',
self.parse_exp("Bug(user) [ FOO ] failures/expected/text.html [ Failure ]\n"
"Bug(user) non-existent-test.html [ Failure ]\n"
"Bug(user) disabled-test.html-disabled [ ImageOnlyFailure ]\n"
"Bug(user) [ Release ] test-to-rebaseline.html [ NeedsRebaseline ]", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = ("expectations:1 Unrecognized specifier 'foo' failures/expected/text.html\n"
"expectations:2 Path does not exist. non-existent-test.html\n"
"expectations:4 A test cannot be rebaselined for Debug/Release. test-to-rebaseline.html")
self.assertEqual(str(e), warnings)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
oc = OutputCapture()
try:
oc.capture_output()
self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
finally:
_, _, logs = oc.restore_output()
self.assertNotEquals(logs, '')
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_overrides(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
self.assert_exp_list('failures/expected/text.html', [FAIL, IMAGE])
def test_overrides__directory(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected [ Crash ]")
self.assert_exp_list('failures/expected/text.html', [FAIL, CRASH])
self.assert_exp_list('failures/expected/image.html', [CRASH])
def test_overrides__duplicate(self):
self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
"Bug(override) failures/expected/text.html [ Crash ]\n")
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_enabled, sanitizer_is_enabled=False)
self.parse_exp(self.get_basic_expectations())
self.assertTrue(match('failures/expected/text.html', FAIL, True))
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
self.assertTrue(match('failures/expected/image_checksum.html', PASS, True))
self.assertTrue(match('failures/expected/image_checksum.html', PASS, False))
self.assertTrue(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('failures/expected/needsrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsrebaseline.html', CRASH, True))
self.assertTrue(match('failures/expected/needsmanualrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsmanualrebaseline.html', CRASH, True))
self.assertTrue(match('passes/text.html', PASS, False))
def test_sanitizer_flag(self):
def match(test, result):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_are_enabled=False, sanitizer_is_enabled=True)
self.parse_exp("""
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
""")
self.assertTrue(match('failures/expected/crash.html', CRASH))
self.assertTrue(match('failures/expected/image.html', PASS))
self.assertTrue(match('failures/expected/text.html', PASS))
self.assertTrue(match('failures/expected/timeout.html', TIMEOUT))
def test_more_specific_override_resets_skip(self):
self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
"Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
self.assert_exp('failures/expected/text.html', IMAGE)
self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
'failures/expected/text.html') in
self._exp.get_tests_with_result_type(SKIP))
def test_bot_test_expectations(self):
"""Test that expectations are merged rather than overridden when using flaky option 'unexpected'."""
test_name1 = 'failures/expected/text.html'
test_name2 = 'passes/text.html'
expectations_dict = OrderedDict()
expectations_dict['expectations'] = "Bug(x) %s [ ImageOnlyFailure ]\nBug(x) %s [ Slow ]\n" % (test_name1, test_name2)
self._port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([IMAGE]))
self.assertEqual(expectations.get_expectations(test_name2), set([SLOW]))
def bot_expectations():
return {test_name1: ['PASS', 'TIMEOUT'], test_name2: ['CRASH']}
self._port.bot_expectations = bot_expectations
self._port._options.ignore_flaky_tests = 'unexpected'
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([PASS, IMAGE, TIMEOUT]))
self.assertEqual(expectations.get_expectations(test_name2), set([CRASH, SLOW]))
class SkippedTests(Base):
def check(self, expectations, overrides, skips, lint=False, expected_results=[WONTFIX, SKIP, FAIL]):
port = MockHost().port_factory.get('test-win-xp')
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(skips)
expectations_to_lint = expectations_dict if lint else None
exp = TestExpectations(port, ['failures/expected/text.html'], expectations_dict=expectations_to_lint, is_lint_mode=lint)
self.assertEqual(exp.get_expectations('failures/expected/text.html'), set(expected_results))
def test_skipped_tests_work(self):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'], expected_results=[WONTFIX, SKIP])
def test_duplicate_skipped_test_fails_lint(self):
self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n',
overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
port = MockHost().port_factory.get('test-win-xp')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = ''
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
capture = OutputCapture()
capture.capture_output()
exp = TestExpectations(port)
_, _, logs = capture.restore_output()
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
def test_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
notrun = 'failures/expected/text.html'
self._exp.add_extra_skipped_tests([notrun])
self.assertEqual('NOTRUN', self._exp.get_expectations_string(notrun))
class ExpectationSyntaxTests(Base):
def test_unrecognized_expectation(self):
self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
def assert_tokenize_exp(self, line, bugs=None, specifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
bugs = bugs or []
specifiers = specifiers or []
expectations = expectations or []
warnings = warnings or []
filename = 'TestExpectations'
line_number = '1'
expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
self.assertEqual(expectation_line.warnings, warnings)
self.assertEqual(expectation_line.name, name)
self.assertEqual(expectation_line.filename, filename)
self.assertEqual(expectation_line.line_numbers, line_number)
if not warnings:
self.assertEqual(expectation_line.specifiers, specifiers)
self.assertEqual(expectation_line.expectations, expectations)
def test_comments(self):
self.assert_tokenize_exp("# comment", name=None, comment="# comment")
self.assert_tokenize_exp("foo.html [ Pass ] # comment", comment="# comment", expectations=['PASS'], specifiers=[])
def test_config_specifiers(self):
self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', specifiers=['MAC'], expectations=['FAIL'])
def test_unknown_config(self):
self.assert_tokenize_exp('[ Foo ] foo.html [ Pass ]', specifiers=['Foo'], expectations=['PASS'])
def test_unknown_expectation(self):
self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
def test_skip(self):
self.assert_tokenize_exp('foo.html [ Skip ]', specifiers=[], expectations=['SKIP'])
def test_slow(self):
self.assert_tokenize_exp('foo.html [ Slow ]', specifiers=[], expectations=['SLOW'])
def test_wontfix(self):
self.assert_tokenize_exp('foo.html [ WontFix ]', specifiers=[], expectations=['WONTFIX', 'SKIP'])
self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', specifiers=[], expectations=['WONTFIX', 'SKIP'],
warnings=['A test marked Skip or WontFix must not have other expectations.'])
def test_blank_line(self):
self.assert_tokenize_exp('', name=None)
def test_warnings(self):
self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('[ [', warnings=['unexpected "["', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('crbug.com/12345 ]', warnings=['unexpected "]"', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.', 'Missing expectations.'])
self.assert_tokenize_exp('foo.html', warnings=['Missing expectations.'])
class SemanticTests(Base):
def test_bug_format(self):
self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
def test_bad_bugid(self):
try:
self.parse_exp('crbug/1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
self.fail('should have raised an error about a bad bug identifier')
except ParseError, exp:
self.assertEqual(len(exp.warnings), 3)
def test_missing_bugid(self):
self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=False)
self.assertFalse(self._exp.has_warnings())
try:
self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=True)
except ParseError, exp:
self.assertEqual(exp.warnings, ['expectations:1 Test lacks BUG specifier. failures/expected/text.html'])
def test_skip_and_wontfix(self):
# Skip is not allowed to have other expectations as well, because those
# expectations won't be exercised and may become stale .
self.parse_exp('failures/expected/text.html [ Failure Skip ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
self.assertTrue(self._exp.has_warnings())
def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
is_lint_mode=True)
def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
Bug(exp) failures/expected/text.html [ Failure ]
Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
self.assertRaises(ParseError, self.parse_exp,
self.get_basic_expectations(), overrides="""
Bug(override) failures/expected/text.html [ Failure ]
Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
def test_duplicate_with_line_before_preceding_line(self):
self.assert_bad_expectations("""Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
Bug(exp) [ Release ] failures/expected/text.html [ Failure ]
Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
""")
def test_missing_file(self):
self.parse_exp('Bug(test) missing_file.html [ Failure ]')
self.assertTrue(self._exp.has_warnings(), 1)
class PrecedenceTests(Base):
def test_file_over_directory(self):
# This tests handling precedence of specific lines over directories
# and tests expectations covering entire directories.
exp_str = """
Bug(x) failures/expected/text.html [ Failure ]
Bug(y) failures/expected [ WontFix ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
exp_str = """
Bug(x) failures/expected [ WontFix ]
Bug(y) failures/expected/text.html [ Failure ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
def test_ambiguous(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win ] passes/text.html [ Failure ]\n")
def test_more_specifiers(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
def test_order_in_file(self):
self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
"Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
def test_macro_overrides(self):
self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
"Bug(test) [ XP ] passes/text.html [ Failure ]\n")
class RemoveConfigurationsTest(Base):
def test_remove(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
self.assertEqual("""Bug(x) [ Linux Win7 Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_needs_rebaseline(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Win ] failures/expected/foo.html [ NeedsRebaseline ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
self.assertEqual("""Bug(x) [ XP Debug ] failures/expected/foo.html [ NeedsRebaseline ]
Bug(x) [ Win7 ] failures/expected/foo.html [ NeedsRebaseline ]
""", actual_expectations)
def test_remove_multiple_configurations(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([
('failures/expected/foo.html', test_config),
('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration()),
])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments_at_start(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments_at_end_with_no_trailing_newline(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]""", actual_expectations)
def test_remove_line_leaves_comments_for_next_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should not get stripped.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_no_whitespace_lines(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should get stripped.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual(""" # This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_first_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual(""" # This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_flaky_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win ] failures/expected/foo.html [ Failure Timeout ]
Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
self.assertEqual("""Bug(x) [ Win Debug ] failures/expected/foo.html [ Failure Timeout ]
Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
class RebaseliningTest(Base):
def test_get_rebaselining_failures(self):
# Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
class TestExpectationsParserTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
self._parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], is_lint_mode=False)
def test_expectation_line_for_test(self):
# This is kind of a silly test, but it at least ensures that we don't throw an error.
test_name = 'foo/test.html'
expectations = set(["PASS", "IMAGE"])
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.name = test_name
expectation_line.filename = '<Bot TestExpectations>'
expectation_line.line_numbers = '0'
expectation_line.expectations = expectations
self._parser._parse_line(expectation_line)
self.assertEqual(self._parser.expectation_line_for_test(test_name, expectations), expectation_line)
class TestExpectationSerializationTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
def _tokenize(self, line):
return TestExpectationParser._tokenize_line('path', line, 0)
def assert_round_trip(self, in_string, expected_string=None):
expectation = self._tokenize(in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, expectation.to_string(self._converter))
def assert_list_round_trip(self, in_string, expected_string=None):
host = MockHost()
parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], is_lint_mode=False)
expectations = parser.parse('path', in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
def test_unparsed_to_string(self):
expectation = TestExpectationLine()
self.assertEqual(expectation.to_string(self._converter), '')
expectation.comment = ' Qux.'
self.assertEqual(expectation.to_string(self._converter), '# Qux.')
expectation.name = 'bar'
self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
expectation.specifiers = ['foo']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
expectation.expectations = ['bAz']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
expectation.expectations = ['bAz1', 'baZ2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.specifiers = ['foo1', 'foO2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.warnings.append('Oh the horror.')
self.assertEqual(expectation.to_string(self._converter), '')
expectation.original_string = 'Yes it is!'
self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
def test_unparsed_list_to_string(self):
expectation = TestExpectationLine()
expectation.comment = 'Qux.'
expectation.name = 'bar'
expectation.specifiers = ['foo']
expectation.expectations = ['bAz1', 'baZ2']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
def test_parsed_to_string(self):
expectation_line = TestExpectationLine()
expectation_line.bugs = ['Bug(x)']
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertEqual(expectation_line.to_string(self._converter), None)
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]')
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]')
def test_serialize_parsed_expectations(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_expectations = set([])
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
expectation_line.parsed_expectations = set([FAIL])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
expectation_line.parsed_expectations = set([PASS, IMAGE])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'image pass')
expectation_line.parsed_expectations = set([FAIL, PASS])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
def test_serialize_parsed_specifier_string(self):
expectation_line = TestExpectationLine()
expectation_line.bugs = ['garden-o-matic']
expectation_line.parsed_specifiers = ['the', 'for']
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), 'for the')
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'for the win')
expectation_line.bugs = []
expectation_line.parsed_specifiers = []
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), '')
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'win')
def test_format_line(self):
self.assertEqual(TestExpectationLine._format_line([], ['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
self.assertEqual(TestExpectationLine._format_line([], ['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]')
def test_string_roundtrip(self):
self.assert_round_trip('')
self.assert_round_trip('[')
self.assert_round_trip('FOO [')
self.assert_round_trip('FOO ] bar')
self.assert_round_trip(' FOO [')
self.assert_round_trip(' [ FOO ] ')
self.assert_round_trip('[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] ] ] bar BAZ')
self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
self.assert_round_trip('FOO ] ] bar ==== BAZ')
self.assert_round_trip('=')
self.assert_round_trip('#')
self.assert_round_trip('# ')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo :')
self.assert_round_trip('# Foo : =')
def test_list_roundtrip(self):
self.assert_list_round_trip('')
self.assert_list_round_trip('\n')
self.assert_list_round_trip('\n\n')
self.assert_list_round_trip('bar')
self.assert_list_round_trip('bar\n# Qux.')
self.assert_list_round_trip('bar\n# Qux.\n')
def test_reconstitute_only_these(self):
lines = []
reconstitute_only_these = []
def add_line(matching_configurations, reconstitute):
expectation_line = TestExpectationLine()
expectation_line.original_string = "Nay"
expectation_line.bugs = ['Bug(x)']
expectation_line.name = 'Yay'
expectation_line.parsed_expectations = set([IMAGE])
expectation_line.matching_configurations = matching_configurations
lines.append(expectation_line)
if reconstitute:
reconstitute_only_these.append(expectation_line)
add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
serialized = TestExpectations.list_to_string(lines, self._converter)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]")
serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
def disabled_test_string_whitespace_stripping(self):
# FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
self.assert_round_trip('\n', '')
self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
| bsd-3-clause |
falcontersama/chatbot | node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py | 1446 | 65937 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'true',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| mit |
OpenWhere/scrapy | tests/test_link.py | 68 | 2014 | import unittest
import warnings
from scrapy.link import Link
class LinkTest(unittest.TestCase):
def _assert_same_links(self, link1, link2):
self.assertEqual(link1, link2)
self.assertEqual(hash(link1), hash(link2))
def _assert_different_links(self, link1, link2):
self.assertNotEqual(link1, link2)
self.assertNotEqual(hash(link1), hash(link2))
def test_eq_and_hash(self):
l1 = Link(b"http://www.example.com")
l2 = Link(b"http://www.example.com/other")
l3 = Link(b"http://www.example.com")
self._assert_same_links(l1, l1)
self._assert_different_links(l1, l2)
self._assert_same_links(l1, l3)
l4 = Link(b"http://www.example.com", text="test")
l5 = Link(b"http://www.example.com", text="test2")
l6 = Link(b"http://www.example.com", text="test")
self._assert_same_links(l4, l4)
self._assert_different_links(l4, l5)
self._assert_same_links(l4, l6)
l7 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=False)
l8 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=False)
l9 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=True)
l10 = Link(b"http://www.example.com", text="test", fragment='other', nofollow=False)
self._assert_same_links(l7, l8)
self._assert_different_links(l7, l9)
self._assert_different_links(l7, l10)
def test_repr(self):
l1 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=True)
l2 = eval(repr(l1))
self._assert_same_links(l1, l2)
def test_unicode_url(self):
with warnings.catch_warnings(record=True) as w:
link = Link(u"http://www.example.com/\xa3")
self.assertIsInstance(link.url, bytes)
self.assertEqual(link.url, b'http://www.example.com/\xc2\xa3')
assert len(w) == 1, "warning not issued"
| bsd-3-clause |
Clear-ICT/odoo-addons | sale_new_so/models/sale.py | 1 | 3044 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2015 Clear ICT Solutions <info@clearict.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import api, models
class SaleOrder(models.Model):
_inherit = 'sale.order.line'
# Copied verbatim from a similar function in sale_sample module.
#
def _get_fields_from_line(self):
"""Internal function to get the fields of the sale order line. Modules
enhancing this one should add their own fields to the return value."""
res = {
'product_id': self.product_id.id,
'name': self.name,
'price_unit': self.price_unit,
'product_uom': self.product_uom.id,
'product_uom_qty': self.product_uom_qty,
'product_uos_qty': self.product_uos_qty,
}
# Simple check for installation of sale_line_code module
if hasattr(self, 'order_line_ref'):
res.update({'order_line_ref': self.order_line_ref})
return res
@api.multi
def create_so_from_line(self):
self.ensure_one()
OrigSO = self.order_id
new_name = OrigSO.get_so_create_name(OrigSO.partner_id.id, '/')
new_so = {
'origin': OrigSO.name,
'name': new_name,
'partner_id': OrigSO.partner_id.id,
'date_order': OrigSO.date_order,
'client_order_ref': OrigSO.client_order_ref,
'pricelist_id':
OrigSO.pricelist_id and OrigSO.pricelist_id.id or False,
'currency_id':
OrigSO.currency_id and OrigSO.currency_id.id or False,
'user_id': OrigSO.user_id.id,
'section_id': OrigSO.section_id and OrigSO.section_id.id or False,
'payment_term':
OrigSO.payment_term and OrigSO.payment_term.id or False,
'fiscal_position':
OrigSO.fiscal_position and OrigSO.fiscal_position.id or False,
'company_id': OrigSO.company_id and OrigSO.company_id.id or False,
'order_line': [(0, 0, self._get_fields_from_line())]
}
NewSO = OrigSO.create(new_so)
return {
'type': 'ir.actions.act_window',
'res_model': 'sale.order',
'res_id': NewSO.id,
'view_mode': 'form,tree,calendar,graph',
'view_type': 'form',
}
| agpl-3.0 |
GREO/GNU-Radio | gnuradio-examples/python/audio/audio_copy.py | 17 | 2378 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-I", "--audio-input", type="string", default="",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
src = audio.source (sample_rate, options.audio_input)
dst = audio.sink (sample_rate, options.audio_output)
# Determine the maximum number of outputs on the source and
# maximum number of inputs on the sink, then connect together
# the most channels we can without overlap
nchan = min (src.output_signature().max_streams(),
dst.input_signature().max_streams())
for i in range (nchan):
self.connect ((src, i), (dst, i))
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| gpl-3.0 |
c-wilson/klustaviewa | klustaviewa/views/tests/test_projectionview.py | 2 | 1598 | """Unit tests for projection view."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import numpy as np
import numpy.random as rnd
import pandas as pd
from kwiklib.dataio import KlustersLoader
from kwiklib.dataio.selection import select
from kwiklib.dataio.tools import check_dtype, check_shape
from klustaviewa import USERPREF
from klustaviewa.views import ProjectionView
from klustaviewa.views.tests.utils import show_view, get_data, assert_fun
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_projectionview():
kwargs = {}
kwargs['operators'] = [
lambda self: assert_fun(self.view.get_projection(0) == (0, 0)),
lambda self: assert_fun(self.view.get_projection(1) == (0, 1)),
lambda self: self.view.select_channel(0, 5),
lambda self: self.view.select_feature(0, 1),
lambda self: self.view.select_channel(1, 32),
lambda self: self.view.select_feature(1, 2),
lambda self: assert_fun(self.view.get_projection(0) == (5, 1)),
lambda self: assert_fun(self.view.get_projection(1) == (32, 2)),
lambda self: (self.close()
if USERPREF['test_auto_close'] != False else None),
]
kwargs['fetdim'] = 3
kwargs['nchannels'] = 32
kwargs['nextrafet'] = 3
# Show the view.
show_view(ProjectionView, **kwargs)
| bsd-3-clause |
petewarden/tensorflow | tensorflow/python/tools/api/generator/output_init_files_test.py | 11 | 7064 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for api_init_files.bzl and api_init_files_v1.bzl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# The unused imports are needed so that the python and lite modules are
# available in sys.modules
# pylint: disable=unused-import
from tensorflow import python as _tf_for_api_traversal
from tensorflow.lite.python import lite as _tflite_for_api_traversal
from tensorflow.python import modules_with_exports
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.coordinator import cluster_coordinator
from tensorflow.python.framework import combinations
from tensorflow.python.framework import test_combinations
# pylint: enable=unused-import
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
def _get_module_from_symbol(symbol):
if '.' not in symbol:
return ''
return '.'.join(symbol.split('.')[:-1])
def _get_modules(package, attr_name, constants_attr_name):
"""Get list of TF API modules.
Args:
package: We only look at modules that contain package in the name.
attr_name: Attribute set on TF symbols that contains API names.
constants_attr_name: Attribute set on TF modules that contains
API constant names.
Returns:
Set of TensorFlow API modules.
"""
modules = set()
# TODO(annarev): split up the logic in create_python_api.py so that
# it can be reused in this test.
for module in list(sys.modules.values()):
if (not module or not hasattr(module, '__name__') or
package not in module.__name__):
continue
for module_contents_name in dir(module):
attr = getattr(module, module_contents_name)
_, attr = tf_decorator.unwrap(attr)
# Add modules to _tf_api_constants attribute.
if module_contents_name == constants_attr_name:
for exports, _ in attr:
modules.update(
[_get_module_from_symbol(export) for export in exports])
continue
# Add modules for _tf_api_names attribute.
if (hasattr(attr, '__dict__') and attr_name in attr.__dict__):
modules.update([
_get_module_from_symbol(export)
for export in getattr(attr, attr_name)])
return modules
def _get_files_set(path, start_tag, end_tag):
"""Get set of file paths from the given file.
Args:
path: Path to file. File at `path` is expected to contain a list of paths
where entire list starts with `start_tag` and ends with `end_tag`. List
must be comma-separated and each path entry must be surrounded by double
quotes.
start_tag: String that indicates start of path list.
end_tag: String that indicates end of path list.
Returns:
List of string paths.
"""
with open(path, 'r') as f:
contents = f.read()
start = contents.find(start_tag) + len(start_tag) + 1
end = contents.find(end_tag)
contents = contents[start:end]
file_paths = [
file_path.strip().strip('"') for file_path in contents.split(',')]
return set(file_path for file_path in file_paths if file_path)
def _module_to_paths(module):
"""Get all API __init__.py file paths for the given module.
Args:
module: Module to get file paths for.
Returns:
List of paths for the given module. For e.g. module foo.bar
requires 'foo/__init__.py' and 'foo/bar/__init__.py'.
"""
submodules = []
module_segments = module.split('.')
for i in range(len(module_segments)):
submodules.append('.'.join(module_segments[:i+1]))
paths = []
for submodule in submodules:
if not submodule:
paths.append('__init__.py')
continue
paths.append('%s/__init__.py' % (submodule.replace('.', '/')))
return paths
class OutputInitFilesTest(test.TestCase):
"""Test that verifies files that list paths for TensorFlow API."""
def _validate_paths_for_modules(
self, actual_paths, expected_paths, file_to_update_on_error):
"""Validates that actual_paths match expected_paths.
Args:
actual_paths: */__init__.py file paths listed in file_to_update_on_error.
expected_paths: */__init__.py file paths that we need to create for
TensorFlow API.
file_to_update_on_error: File that contains list of */__init__.py files.
We include it in error message printed if the file list needs to be
updated.
"""
self.assertTrue(actual_paths)
self.assertTrue(expected_paths)
missing_paths = expected_paths - actual_paths
extra_paths = actual_paths - expected_paths
# Surround paths with quotes so that they can be copy-pasted
# from error messages as strings.
missing_paths = ['\'%s\'' % path for path in missing_paths]
extra_paths = ['\'%s\'' % path for path in extra_paths]
self.assertFalse(
missing_paths,
'Please add %s to %s.' % (
',\n'.join(sorted(missing_paths)), file_to_update_on_error))
self.assertFalse(
extra_paths,
'Redundant paths, please remove %s in %s.' % (
',\n'.join(sorted(extra_paths)), file_to_update_on_error))
def test_V2_init_files(self):
modules = _get_modules(
'tensorflow', '_tf_api_names', '_tf_api_constants')
file_path = resource_loader.get_path_to_datafile(
'api_init_files.bzl')
paths = _get_files_set(
file_path, '# BEGIN GENERATED FILES', '# END GENERATED FILES')
module_paths = set(
f for module in modules for f in _module_to_paths(module))
self._validate_paths_for_modules(
paths, module_paths, file_to_update_on_error=file_path)
def test_V1_init_files(self):
modules = _get_modules(
'tensorflow', '_tf_api_names_v1', '_tf_api_constants_v1')
file_path = resource_loader.get_path_to_datafile(
'api_init_files_v1.bzl')
paths = _get_files_set(
file_path, '# BEGIN GENERATED FILES', '# END GENERATED FILES')
module_paths = set(
f for module in modules for f in _module_to_paths(module))
self._validate_paths_for_modules(
paths, module_paths, file_to_update_on_error=file_path)
if __name__ == '__main__':
test.main()
| apache-2.0 |
timbr-io/juno-magic | juno_magic/client.py | 2 | 12780 | """Implements a fully blocking kernel client.
Useful for test suites and blocking terminal interfaces.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from functools import partial
from getpass import getpass
try:
from queue import Empty # Python 3
except ImportError:
from Queue import Empty # Python 2
import sys
import time
import zmq
from traitlets import Type
from jupyter_client.channels import HBChannel
from jupyter_client.client import KernelClient
# from .channels import ZMQSocketChannel
from jupyter_client.blocking.channels import ZMQSocketChannel
try:
monotonic = time.monotonic
except AttributeError:
# py2
monotonic = time.time # close enough
try:
TimeoutError
except NameError:
# py2
TimeoutError = RuntimeError
def reqrep(meth):
def wrapped(self, *args, **kwargs):
reply = kwargs.pop('reply', False)
timeout = kwargs.pop('timeout', None)
msg_id = meth(self, *args, **kwargs)
if not reply:
return msg_id
return self._recv_reply(msg_id, timeout=timeout)
try:
basedoc, _ = meth.__doc__.split('Returns\n', 1)
parts = [basedoc.strip()]
if 'Parameters' not in basedoc:
parts.append("""
Parameters
----------
""")
parts.append("""
reply: bool (default: False)
Whether to wait for and return reply
timeout: float or None (default: None)
Timeout to use when waiting for a reply
Returns
-------
msg_id: str
The msg_id of the request sent, if reply=False (default)
reply: dict
The reply message for this request, if reply=True
""")
wrapped.__doc__ = '\n'.join(parts)
except ValueError as ve:
pass
return wrapped
class BlockingKernelClient(KernelClient):
"""A BlockingKernelClient """
def wait_for_ready(self, timeout=None):
"""Waits for a response when a client is blocked
- Sets future time for timeout
- Blocks on shell channel until a message is received
- Exit if the kernel has died
- If client times out before receiving a message from the kernel, send RuntimeError
- Flush the IOPub channel
"""
if timeout is None:
abs_timeout = float('inf')
else:
abs_timeout = time.time() + timeout
#from ..manager import KernelManager
from jupyter_client.manager import KernelManager
if not isinstance(self.parent, KernelManager):
# This Client was not created by a KernelManager,
# so wait for kernel to become responsive to heartbeats
# before checking for kernel_info reply
while not self.is_alive():
if time.time() > abs_timeout:
raise RuntimeError("Kernel didn't respond to heartbeats in %d seconds and timed out" % timeout)
time.sleep(0.2)
# Wait for kernel info reply on shell channel
while True:
try:
msg = self.shell_channel.get_msg(block=True, timeout=1)
except Empty:
pass
else:
if msg['msg_type'] == 'kernel_info_reply':
self._handle_kernel_info_reply(msg)
break
if not self.is_alive():
raise RuntimeError('Kernel died before replying to kernel_info')
# Check if current time is ready check time plus timeout
if time.time() > abs_timeout:
raise RuntimeError("Kernel didn't respond in %d seconds" % timeout)
# Flush IOPub channel
while True:
try:
msg = self.iopub_channel.get_msg(block=True, timeout=0.2)
except Empty:
break
# The classes to use for the various channels
shell_channel_class = Type(ZMQSocketChannel)
iopub_channel_class = Type(ZMQSocketChannel)
stdin_channel_class = Type(ZMQSocketChannel)
hb_channel_class = Type(HBChannel)
def _recv_reply(self, msg_id, timeout=None):
"""Receive and return the reply for a given request"""
if timeout is not None:
deadline = monotonic() + timeout
while True:
if timeout is not None:
timeout = max(0, deadline - monotonic())
try:
reply = self.get_shell_msg(timeout=timeout)
except Empty:
raise TimeoutError("Timeout waiting for reply")
if reply['parent_header'].get('msg_id') != msg_id:
# not my reply, someone may have forgotten to retrieve theirs
continue
return reply
execute = reqrep(KernelClient.execute)
history = reqrep(KernelClient.history)
complete = reqrep(KernelClient.complete)
inspect = reqrep(KernelClient.inspect)
kernel_info = reqrep(KernelClient.kernel_info)
comm_info = reqrep(KernelClient.comm_info)
shutdown = reqrep(KernelClient.shutdown)
def _stdin_hook_default(self, msg):
"""Handle an input request"""
content = msg['content']
if content.get('password', False):
prompt = getpass
elif sys.version_info < (3,):
prompt = raw_input
else:
prompt = input
try:
raw_data = prompt(content["prompt"])
except EOFError:
# turn EOFError into EOF character
raw_data = '\x04'
except KeyboardInterrupt:
sys.stdout.write('\n')
return
# only send stdin reply if there *was not* another request
# or execution finished while we were reading.
if not (self.stdin_channel.msg_ready() or self.shell_channel.msg_ready()):
self.input(raw_data)
def _output_hook_default(self, msg):
"""Default hook for redisplaying plain-text output"""
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'stream':
stream = getattr(sys, content['name'])
stream.write(content['text'])
elif msg_type in ('display_data', 'execute_result'):
sys.stdout.write(content['data'].get('text/plain', ''))
elif msg_type == 'error':
print('\n'.join(content['traceback']), file=sys.stderr)
def _output_hook_kernel(self, session, socket, parent_header, msg):
"""Output hook when running inside an IPython kernel
adds rich output support.
"""
msg_type = msg['header']['msg_type']
if msg_type in ('display_data', 'execute_result', 'error'):
session.send(socket, msg_type, msg['content'], parent=parent_header)
else:
self._output_hook_default(msg)
def interactive(self, fn, *args, **kwargs):
timeout = kwargs.get('timeout', None)
if timeout is not None:
deadline = monotonic() + timeout
timeout = max(0, deadline - monotonic())
msg_id = fn(*args, **kwargs)
return self._recv_reply(msg_id, timeout=timeout)
def execute_interactive(self, code, silent=False, store_history=True,
user_expressions=None, allow_stdin=None, stop_on_error=True,
timeout=None, output_hook=None, stdin_hook=None,
):
"""Execute code in the kernel interactively
Output will be redisplayed, and stdin prompts will be relayed as well.
If an IPython kernel is detected, rich output will be displayed.
You can pass a custom output_hook callable that will be called
with every IOPub message that is produced instead of the default redisplay.
Parameters
----------
code : str
A string of code in the kernel's language.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible, and
will force store_history to be False.
store_history : bool, optional (default True)
If set, the kernel will store command history. This is forced
to be False if silent is True.
user_expressions : dict, optional
A dict mapping names to expressions to be evaluated in the user's
dict. The expression values are returned as strings formatted using
:func:`repr`.
allow_stdin : bool, optional (default self.allow_stdin)
Flag for whether the kernel can send stdin requests to frontends.
Some frontends (e.g. the Notebook) do not support stdin requests.
If raw_input is called from code executed from such a frontend, a
StdinNotImplementedError will be raised.
stop_on_error: bool, optional (default True)
Flag whether to abort the execution queue, if an exception is encountered.
timeout: float or None (default: None)
Timeout to use when waiting for a reply
output_hook: callable(msg)
Function to be called with output messages.
If not specified, output will be redisplayed.
stdin_hook: callable(msg)
Function to be called with stdin_request messages.
If not specified, input/getpass will be called.
Returns
-------
reply: dict
The reply message for this request
"""
if not self.iopub_channel.is_alive():
raise RuntimeError("IOPub channel must be running to receive output")
if allow_stdin is None:
allow_stdin = self.allow_stdin
if allow_stdin and not self.stdin_channel.is_alive():
raise RuntimeError("stdin channel must be running to allow input")
msg_id = self.execute(code,
silent=silent,
store_history=store_history,
user_expressions=user_expressions,
allow_stdin=allow_stdin,
stop_on_error=stop_on_error,
)
if stdin_hook is None:
stdin_hook = self._stdin_hook_default
if output_hook is None:
# detect IPython kernel
if 'IPython' in sys.modules:
from IPython import get_ipython
ip = get_ipython()
in_kernel = getattr(ip, 'kernel', False)
if in_kernel:
output_hook = partial(
self._output_hook_kernel,
ip.display_pub.session,
ip.display_pub.pub_socket,
ip.display_pub.parent_header,
)
if output_hook is None:
# default: redisplay plain-text outputs
output_hook = self._output_hook_default
# set deadline based on timeout
if timeout is not None:
deadline = monotonic() + timeout
else:
timeout_ms = None
poller = zmq.Poller()
iopub_socket = self.iopub_channel.socket
poller.register(iopub_socket, zmq.POLLIN)
if allow_stdin:
stdin_socket = self.stdin_channel.socket
poller.register(stdin_socket, zmq.POLLIN)
else:
stdin_socket = None
# wait for output and redisplay it
# NOTE: Commenting out the following lines so that we can continue to proxy all iopub messages over Wamp
# while True:
# if timeout is not None:
# timeout = max(0, deadline - monotonic())
# timeout_ms = 1e3 * timeout
# events = dict(poller.poll(timeout_ms))
# if not events:
# raise TimeoutError("Timeout waiting for output")
# if stdin_socket in events:
# req = self.stdin_channel.get_msg(timeout=0)
# stdin_hook(req)
# continue
# if iopub_socket not in events:
# continue
#
# msg = self.iopub_channel.get_msg(timeout=0)
#
# if msg['parent_header'].get('msg_id') != msg_id:
# # not from my request
# continue
# output_hook(msg)
#
# # stop on idle
# if msg['header']['msg_type'] == 'status' and \
# msg['content']['execution_state'] == 'idle':
# break
#
# # output is done, get the reply
if timeout is not None:
timeout = max(0, deadline - monotonic())
return self._recv_reply(msg_id, timeout=timeout)
| mit |
cchurch/ansible | lib/ansible/modules/cloud/ovirt/ovirt_cluster_facts.py | 55 | 3498 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_cluster_facts
short_description: Retrieve facts about one or more oVirt/RHV clusters
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV clusters."
notes:
- "This module creates a new top-level C(ovirt_clusters) fact, which
contains a list of clusters."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search cluster X from datacenter Y use following pattern:
name=X and datacenter=Y"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all clusters which names start with C<production>:
- ovirt_cluster_facts:
pattern:
name: 'production*'
- debug:
var: ovirt_clusters
'''
RETURN = '''
ovirt_clusters:
description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
clusters = clusters_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_clusters=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in clusters
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
GreenRecycleBin/servo | tests/wpt/css-tests/css-fonts-3_dev/html/reference/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 |
mozilla/kuma | kuma/scrape/tests/test_fixture.py | 1 | 4594 |
import pytest
from constance import config
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from kuma.users.models import User, UserBan
from ..fixture import FixtureLoader
def test_empty_fixtures():
"""A empty specification is OK, does nothing."""
FixtureLoader({}).load()
@pytest.mark.django_db
def test_load_constance():
"""A fixture without dependencies is loaded."""
spec = {
'database.constance': [{
'key': 'KUMASCRIPT_MAX_AGE', 'value': 321
}]}
FixtureLoader(spec).load()
config._backend._cache = None # Avoid cached config value
assert config.KUMASCRIPT_MAX_AGE == 321
@pytest.mark.django_db
def test_load_group():
"""A fixture with existing to-many dependencies is loaded."""
spec = {
'auth.group': [{
'name': 'Attachment Moderators',
'permissions': [
['change_attachment']
]
}],
'auth.permission': [{
'codename': 'change_attachment',
'name': 'Can change attachment',
'content_type': ['attachments', 'attachment']
}],
'contenttypes.contenttype': [{
'app_label': 'attachments',
'model': 'attachment',
}]
}
ct_attachment = ContentType.objects.get(
app_label='attachments', model='attachment')
perm_attachment = Permission.objects.get(
content_type=ct_attachment, codename='change_attachment')
group_query = Group.objects.filter(name='Attachment Moderators')
assert not group_query.exists()
FixtureLoader(spec).load()
group = group_query.get()
assert list(group.permissions.all()) == [perm_attachment]
def test_underspecified_key_is_error():
"""A fixture must define all the natural key items."""
spec = {
'users.user': [{
'email': 'email@example.com',
'password': 'password',
}]
}
with pytest.raises(ValueError) as error:
FixtureLoader(spec)
assert str(error.value) == 'users.user 0: Needs key "username"'
@pytest.mark.django_db
def test_relation_as_key():
"""A fixture with a relation as a key can be loaded."""
spec = {
'users.user': [{
'username': 'admin',
'is_staff': True
}, {
'username': 'spammer',
}],
'users.userban': [{
'user': ['spammer'],
'by': ['admin'],
'reason': 'Spam'
}]
}
FixtureLoader(spec).load()
ban = UserBan.objects.get()
assert ban.user.username == 'spammer'
assert ban.by.username == 'admin'
@pytest.mark.django_db
def test_update_m2m_of_existing_instance():
"""Many-to-many relations of existing instances are updated."""
user = User.objects.create(username='ironman')
assert not user.groups.exists()
spec = {
'auth.group': [{
'name': 'Avengers',
}, {
'name': 'Illuminati',
}],
'users.user': [{
'username': 'ironman',
'is_staff': True,
'groups': [['Avengers'], ['Illuminati']]
}]
}
FixtureLoader(spec).load()
user.refresh_from_db()
new_groups = list(sorted(user.groups.values_list('name', flat=True)))
assert new_groups == ['Avengers', 'Illuminati']
def test_missing_relation_is_error():
"""An unspecified relation is detected, raises exception."""
spec = {
'users.user': [{
'username': 'captain_america',
'groups': [['SHIELD']]
}]
}
with pytest.raises(RuntimeError) as error:
FixtureLoader(spec).load()
assert str(error.value) == 'Dependency block detected.'
@pytest.mark.django_db
def test_missing_key_relation_is_error():
"""An unspecified relation is detected, raises exception."""
spec = {
'users.user': [{
'username': 'odin',
}],
'users.userban': [{
'user': ['loki'],
'by': ['odin'],
'reason': 'Treason'
}]
}
with pytest.raises(RuntimeError) as error:
FixtureLoader(spec).load()
assert str(error.value) == 'Dependency block detected.'
@pytest.mark.django_db
def test_user_password():
"""A user.password is hashed."""
spec = {
'users.user': [{
'username': 'wagstaff',
'password': 'swordfish'
}]
}
FixtureLoader(spec).load()
user = User.objects.get(username='wagstaff')
assert user.check_password('swordfish')
| mpl-2.0 |
fras2560/InducedSubgraph | inducer/test/testDcolor.py | 1 | 5492 | '''
@author: Dallas Fraser
@date: 2019-08-10
@summary: Tests for the dcolor class and methods
'''
from inducer.dcolor import Dcolor, combine_color_clique, convert_combo,\
add_list
from inducer.helper import make_diamond, make_claw, make_cycle, join
import networkx as nx
import unittest
class TestDcolor(unittest.TestCase):
def setUp(self):
self.dcolor = Dcolor(make_claw())
def tearDown(self):
pass
def testColoringAux(self):
self.dcolor.color_aux()
def testCreateCliqueList(self):
c, k, i = self.dcolor.create_clique_list()
self.assertEqual([[[0], [1]], [[2]], [[3]]], c)
self.assertEqual(k, 2)
self.assertEqual(i, 0)
self.dcolor = Dcolor(make_diamond())
c, k, i = self.dcolor.create_clique_list()
self.assertEqual([[[0], [1], [2]], [[3]]], c)
self.assertEqual(k, 3)
self.assertEqual(i, 0)
def testColor(self):
result = self.dcolor.color()
expect = [[0], [1, 2, 3]]
self.assertEqual(expect, result, "Coloring: Claw Case")
self.dcolor = Dcolor(make_diamond())
result = self.dcolor.color()
expect = [[0], [1], [2, 3]]
self.assertEqual(expect, result, "Coloring: Diamond Case")
g = nx.Graph()
g.add_node(0)
g.add_node(1)
self.dcolor = Dcolor(g)
result = self.dcolor.color()
expect = [[0, 1]]
self.assertEqual(expect, result, "Coloring: Stable Set")
def testColoringCritical(self):
self.dcolor = Dcolor(make_cycle(5))
color = self.dcolor.color()
expect = [[0], [1, 3], [2, 4]]
self.assertEqual(len(color), 3)
self.assertEqual(color, expect)
k1 = nx.Graph()
k1.add_node(0)
g = join(make_cycle(5), k1)
self.dcolor = Dcolor(g)
color = self.dcolor.color()
self.assertEqual(len(color), 4)
expect = [[5], [0], [1, 3], [2, 4]]
self.assertEqual(expect, color)
def testColoringHardGraph(self):
# C5 + 5 Xi
g = make_cycle(5)
index = 0
for i in range(5, 10):
g.add_node(i)
# make it a two vertex
g.add_edge(i, (index + 0) % 5) # xi
g.add_edge(i, (index + 1) % 5)
index += 1
g.add_edge(5, 6)
g.add_edge(5, 8)
g.add_edge(6, 8)
g.add_edge(7, 9)
self.dcolor = Dcolor(g)
color = self.dcolor.color()
expect = [[1, 8, 9], [5, 4, 7], [2], [0, 3, 6]]
self.assertEqual(self.dcolor.valid_coloring(color), True)
self.assertEqual(color, expect)
# C5 + 2 Yi
g = make_cycle(5)
g.add_node(5)
g.add_edge(0, 5)
g.add_edge(1, 5)
g.add_edge(2, 5)
g.add_node(6)
g.add_edge(0, 6)
g.add_edge(3, 6)
g.add_edge(4, 6)
self.dcolor = Dcolor(g)
color = self.dcolor.color()
expect = [[0, 3], [1, 4], [5, 6], [2]]
self.assertEqual(self.dcolor.valid_coloring(color), True)
self.assertEqual(color, expect)
def testColorCycle(self):
g = make_cycle(5)
for i in range(5, 100):
g.add_node(i)
g.add_edge(i - 1, i)
self.dcolor = Dcolor(g)
color = self.dcolor.color()
self.assertEqual(len(color), 3)
self.assertEqual(self.dcolor.valid_coloring(color), True)
def testColoringClique(self):
g = make_cycle(3)
self.dcolor = Dcolor(g)
color = self.dcolor.color()
expect = [[0], [1], [2]]
self.assertEqual(len(color), 3)
self.assertEqual(color, expect)
def testCombineColorClique(self):
coloring = [[3], [2]]
clique = [[0], [1]]
expect = [
[[0, 3], [1, 2]],
[[1, 3], [0, 2]]
]
index = 0
for combo in combine_color_clique(clique, coloring):
self.assertEqual(combo, expect[index])
index += 1
coloring = [[0, 1]]
clique = [[2], [3]]
expect = [
[[2, 0, 1], [3]],
[[2], [3, 0, 1]],
[[3, 0, 1], [2]],
[[3], [2, 0, 1]]
]
index = 0
for combo in combine_color_clique(clique, coloring):
self.assertEqual(combo, expect[index])
self.assertEqual(combo, expect[index])
index += 1
coloring = [[0], [1], [2]]
clique = [[3], [4]]
expect = [
[[0, 3], [1, 4], [2]],
[[0], [1, 3], [2, 4]],
[[0, 4], [1, 3], [2]],
[[0], [1, 4], [2, 3]]
]
index = 0
for combo in combine_color_clique(clique, coloring):
self.assertEqual(combo, expect[index])
index += 1
def testAddList(self):
l1 = [[1], [2]]
l2 = [[3], [4, 5]]
result = add_list(l1, l2, 0)
expect = [[1, 3], [2, 4, 5]]
self.assertEqual(result, expect)
l1 = [[1], [2], [6]]
l2 = [[3], [4, 5]]
result = add_list(l1, l2, 0)
expect = [[1, 3], [2, 4, 5], [6]]
self.assertEqual(result, expect)
result = add_list(l1, l2, 1)
expect = [[1], [2, 3], [6, 4, 5]]
self.assertEqual(result, expect)
def testConvertCombo(self):
combo = (4, 1)
conversion = convert_combo(combo)
self.assertEqual(type(conversion), list,
"Convert Combo: did not return list")
| mit |
mavit/ansible-modules-extras | windows/win_dotnet_ngen.py | 138 | 1769 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Peter Mounce <public@neverrunwithscissors.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_dotnet_ngen
version_added: "2.0"
short_description: Runs ngen to recompile DLLs after .NET updates
description:
- After .NET framework is installed/updated, Windows will probably want to recompile things to optimise for the host.
- This happens via scheduled task, usually at some inopportune time.
- This module allows you to run this task on your own schedule, so you incur the CPU hit at some more convenient and controlled time.
- "http://blogs.msdn.com/b/dotnet/archive/2013/08/06/wondering-why-mscorsvw-exe-has-high-cpu-usage-you-can-speed-it-up.aspx"
notes:
- there are in fact two scheduled tasks for ngen but they have no triggers so aren't a problem
- there's no way to test if they've been completed (?)
- the stdout is quite likely to be several megabytes
author: Peter Mounce
'''
EXAMPLES = '''
# Run ngen tasks
win_dotnet_ngen:
'''
| gpl-3.0 |
devendermishrajio/nova | nova/db/sqlalchemy/migrate_repo/versions/233_add_stats_in_compute_nodes.py | 81 | 1460 | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(engine):
meta = MetaData()
meta.bind = engine
# Drop the compute_node_stats table and add a 'stats' column to
# compute_nodes directly. The data itself is transient and doesn't
# need to be copied over.
table_names = ('compute_node_stats', 'shadow_compute_node_stats')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
table.drop()
# Add a new stats column to compute nodes
table_names = ('compute_nodes', 'shadow_compute_nodes')
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
stats = Column('stats', Text, default='{}')
table.create_column(stats)
| apache-2.0 |
Orav/kbengine | kbe/src/lib/python/Lib/ssl.py | 2 | 35365 | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
PROTOCOL_TLSv1_1
PROTOCOL_TLSv1_2
The following constants identify various SSL alert message descriptions as per
http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6
ALERT_DESCRIPTION_CLOSE_NOTIFY
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE
ALERT_DESCRIPTION_BAD_RECORD_MAC
ALERT_DESCRIPTION_RECORD_OVERFLOW
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE
ALERT_DESCRIPTION_HANDSHAKE_FAILURE
ALERT_DESCRIPTION_BAD_CERTIFICATE
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE
ALERT_DESCRIPTION_CERTIFICATE_REVOKED
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN
ALERT_DESCRIPTION_ILLEGAL_PARAMETER
ALERT_DESCRIPTION_UNKNOWN_CA
ALERT_DESCRIPTION_ACCESS_DENIED
ALERT_DESCRIPTION_DECODE_ERROR
ALERT_DESCRIPTION_DECRYPT_ERROR
ALERT_DESCRIPTION_PROTOCOL_VERSION
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY
ALERT_DESCRIPTION_INTERNAL_ERROR
ALERT_DESCRIPTION_USER_CANCELLED
ALERT_DESCRIPTION_NO_RENEGOTIATION
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
ALERT_DESCRIPTION_UNRECOGNIZED_NAME
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY
"""
import textwrap
import re
import sys
import os
from collections import namedtuple
from enum import Enum as _Enum
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import (VERIFY_DEFAULT, VERIFY_CRL_CHECK_LEAF, VERIFY_CRL_CHECK_CHAIN,
VERIFY_X509_STRICT)
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_egd, RAND_add, RAND_bytes, RAND_pseudo_bytes
def _import_symbols(prefix):
for n in dir(_ssl):
if n.startswith(prefix):
globals()[n] = getattr(_ssl, n)
_import_symbols('OP_')
_import_symbols('ALERT_DESCRIPTION_')
_import_symbols('SSL_ERROR_')
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _ssl import _OPENSSL_API_VERSION
_PROTOCOL_NAMES = {
PROTOCOL_TLSv1: "TLSv1",
PROTOCOL_SSLv23: "SSLv23",
PROTOCOL_SSLv3: "SSLv3",
}
try:
from _ssl import PROTOCOL_SSLv2
_SSLv2_IF_EXISTS = PROTOCOL_SSLv2
except ImportError:
_SSLv2_IF_EXISTS = None
else:
_PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
try:
from _ssl import PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2
except ImportError:
pass
else:
_PROTOCOL_NAMES[PROTOCOL_TLSv1_1] = "TLSv1.1"
_PROTOCOL_NAMES[PROTOCOL_TLSv1_2] = "TLSv1.2"
if sys.platform == "win32":
from _ssl import enum_certificates, enum_crls
from socket import socket, AF_INET, SOCK_STREAM, create_connection
from socket import SOL_SOCKET, SO_TYPE
import base64 # for DER-to-PEM translation
import errno
socket_error = OSError # keep that public name in module namespace
if _ssl.HAS_TLS_UNIQUE:
CHANNEL_BINDING_TYPES = ['tls-unique']
else:
CHANNEL_BINDING_TYPES = []
# Disable weak or insecure ciphers by default
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
# Enable a better set of ciphers by default
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Finally use RC4 as a fallback which is problematic but needed for
# compatibility some times.
# * Disable NULL authentication, NULL encryption, and MD5 MACs for security
# reasons
_DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
)
# Restricted and more secure ciphers for the server side
# This list has been explicitly chosen to:
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer any AES-GCM over any AES-CBC for better performance and security
# * Then Use HIGH cipher suites as a fallback
# * Then Use 3DES as fallback which is secure but slow
# * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, and RC4 for
# security reasons
_RESTRICTED_SERVER_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:!RC4'
)
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
leftmost, *remainder = dn.split(r'.')
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
DefaultVerifyPaths = namedtuple("DefaultVerifyPaths",
"cafile capath openssl_cafile_env openssl_cafile openssl_capath_env "
"openssl_capath")
def get_default_verify_paths():
"""Return paths to default cafile and capath.
"""
parts = _ssl.get_default_verify_paths()
# environment vars shadow paths
cafile = os.environ.get(parts[0], parts[1])
capath = os.environ.get(parts[2], parts[3])
return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None,
capath if os.path.isdir(capath) else None,
*parts)
class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")):
"""ASN.1 object identifier lookup
"""
__slots__ = ()
def __new__(cls, oid):
return super().__new__(cls, *_txt2obj(oid, name=False))
@classmethod
def fromnid(cls, nid):
"""Create _ASN1Object from OpenSSL numeric ID
"""
return super().__new__(cls, *_nid2obj(nid))
@classmethod
def fromname(cls, name):
"""Create _ASN1Object from short name, long name or OID
"""
return super().__new__(cls, *_txt2obj(name, name=True))
class Purpose(_ASN1Object, _Enum):
"""SSLContext purpose flags with X509v3 Extended Key Usage objects
"""
SERVER_AUTH = '1.3.6.1.5.5.7.3.1'
CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'
class SSLContext(_SSLContext):
"""An SSLContext holds various SSL-related configuration options and
data, such as certificates and possibly a private key."""
__slots__ = ('protocol', '__weakref__')
_windows_cert_stores = ("CA", "ROOT")
def __new__(cls, protocol, *args, **kwargs):
self = _SSLContext.__new__(cls, protocol)
if protocol != _SSLv2_IF_EXISTS:
self.set_ciphers(_DEFAULT_CIPHERS)
return self
def __init__(self, protocol):
self.protocol = protocol
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
return SSLSocket(sock=sock, server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
server_hostname=server_hostname,
_context=self)
def set_npn_protocols(self, npn_protocols):
protos = bytearray()
for protocol in npn_protocols:
b = bytes(protocol, 'ascii')
if len(b) == 0 or len(b) > 255:
raise SSLError('NPN protocols must be 1 to 255 in length')
protos.append(len(b))
protos.extend(b)
self._set_npn_protocols(protos)
def _load_windows_store_certs(self, storename, purpose):
certs = bytearray()
for cert, encoding, trust in enum_certificates(storename):
# CA certs are never PKCS#7 encoded
if encoding == "x509_asn":
if trust is True or purpose.oid in trust:
certs.extend(cert)
self.load_verify_locations(cadata=certs)
return certs
def load_default_certs(self, purpose=Purpose.SERVER_AUTH):
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
if sys.platform == "win32":
for storename in self._windows_cert_stores:
self._load_windows_store_certs(storename, purpose)
else:
self.set_default_verify_paths()
def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None,
capath=None, cadata=None):
"""Create a SSLContext object with default settings.
NOTE: The protocol and settings may change anytime without prior
deprecation. The values represent a fair balance between maximum
compatibility and security.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
# Prefer the server's ciphers by default so that we get stronger
# encryption
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
# Use single use keys in order to improve forward secrecy
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
# disallow ciphers with known vulnerabilities
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
def _create_stdlib_context(protocol=PROTOCOL_SSLv23, *, cert_reqs=None,
check_hostname=False, purpose=Purpose.SERVER_AUTH,
certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None):
"""Create a SSLContext object for Python stdlib modules
All Python stdlib modules shall use this function to create SSLContext
objects in order to keep common settings in one place. The configuration
is less restrict than create_default_context()'s to increase backward
compatibility.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(protocol)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
if cert_reqs is not None:
context.verify_mode = cert_reqs
context.check_hostname = check_hostname
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile or keyfile:
context.load_cert_chain(certfile, keyfile)
# load CA root certs
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
server_hostname=None,
_context=None):
if _context:
self._context = _context
else:
if server_side and not certfile:
raise ValueError("certfile must be specified for server-side "
"operations")
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile and not keyfile:
keyfile = certfile
self._context = SSLContext(ssl_version)
self._context.verify_mode = cert_reqs
if ca_certs:
self._context.load_verify_locations(ca_certs)
if certfile:
self._context.load_cert_chain(certfile, keyfile)
if npn_protocols:
self._context.set_npn_protocols(npn_protocols)
if ciphers:
self._context.set_ciphers(ciphers)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
# mixed in.
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
raise NotImplementedError("only stream sockets are supported")
if server_side and server_hostname:
raise ValueError("server_hostname can only be specified "
"in client mode")
if self._context.check_hostname and not server_hostname:
if HAS_SNI:
raise ValueError("check_hostname requires server_hostname")
else:
raise ValueError("check_hostname requires server_hostname, "
"but it's not supported by your OpenSSL "
"library")
self.server_side = server_side
self.server_hostname = server_hostname
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
if sock is not None:
socket.__init__(self,
family=sock.family,
type=sock.type,
proto=sock.proto,
fileno=sock.fileno())
self.settimeout(sock.gettimeout())
sock.detach()
elif fileno is not None:
socket.__init__(self, fileno=fileno)
else:
socket.__init__(self, family=family, type=type, proto=proto)
# See if we are connected
try:
self.getpeername()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
connected = False
else:
connected = True
self._closed = False
self._sslobj = None
self._connected = connected
if connected:
# create the SSL object
try:
self._sslobj = self._context._wrap_socket(self, server_side,
server_hostname)
if do_handshake_on_connect:
timeout = self.gettimeout()
if timeout == 0.0:
# non-blocking
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
self.do_handshake()
except (OSError, ValueError):
self.close()
raise
@property
def context(self):
return self._context
@context.setter
def context(self, ctx):
self._context = ctx
self._sslobj.context = ctx
def dup(self):
raise NotImplemented("Can't dup() %s instances" %
self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
pass
def _check_connected(self):
if not self._connected:
# getpeername() will raise ENOTCONN if the socket is really
# not connected; note that we can be connected even without
# _connected being set, e.g. if connect() first returned
# EAGAIN.
self.getpeername()
def read(self, len=0, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Read on closed or unwrapped SSL socket.")
try:
if buffer is not None:
v = self._sslobj.read(len, buffer)
else:
v = self._sslobj.read(len or 1024)
return v
except SSLError as x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
if buffer is not None:
return 0
else:
return b''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Write on closed or unwrapped SSL socket.")
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
self._checkClosed()
self._check_connected()
return self._sslobj.peer_certificate(binary_form)
def selected_npn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_NPN:
return None
else:
return self._sslobj.selected_npn_protocol()
def cipher(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def compression(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.compression()
def send(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
try:
v = self._sslobj.write(data)
except SSLError as x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto(self, data, flags_or_addr, addr=None):
self._checkClosed()
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return socket.sendto(self, data, flags_or_addr)
else:
return socket.sendto(self, data, flags_or_addr, addr)
def sendmsg(self, *args, **kwargs):
# Ensure programs don't send data unencrypted if they try to
# use this method.
raise NotImplementedError("sendmsg not allowed on instances of %s" %
self.__class__)
def sendall(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
return self.read(nbytes, buffer)
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def recvmsg(self, *args, **kwargs):
raise NotImplementedError("recvmsg not allowed on instances of %s" %
self.__class__)
def recvmsg_into(self, *args, **kwargs):
raise NotImplementedError("recvmsg_into not allowed on instances of "
"%s" % self.__class__)
def pending(self):
self._checkClosed()
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def shutdown(self, how):
self._checkClosed()
self._sslobj = None
socket.shutdown(self, how)
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def _real_close(self):
self._sslobj = None
socket._real_close(self)
def do_handshake(self, block=False):
"""Perform a TLS/SSL handshake."""
self._check_connected()
timeout = self.gettimeout()
try:
if timeout == 0.0 and block:
self.settimeout(None)
self._sslobj.do_handshake()
finally:
self.settimeout(timeout)
if self.context.check_hostname:
if not self.server_hostname:
raise ValueError("check_hostname needs server_hostname "
"argument")
match_hostname(self.getpeercert(), self.server_hostname)
def _real_connect(self, addr, connect_ex):
if self.server_side:
raise ValueError("can't connect in server-side mode")
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = self.context._wrap_socket(self, False, self.server_hostname)
try:
if connect_ex:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
self._connected = True
if self.do_handshake_on_connect:
self.do_handshake()
return rc
except (OSError, ValueError):
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock = self.context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True)
return newsock, addr
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
"""
if cb_type not in CHANNEL_BINDING_TYPES:
raise ValueError("Unsupported channel binding type")
if cb_type != "tls-unique":
raise NotImplementedError(
"{0} channel binding type not implemented"
.format(cb_type))
if self._sslobj is None:
return None
return self._sslobj.tls_unique_cb()
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None):
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict')
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodebytes(d.encode('ASCII', 'strict'))
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if ca_certs is not None:
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
context = _create_stdlib_context(ssl_version,
cert_reqs=cert_reqs,
cafile=ca_certs)
with create_connection(addr) as sock:
with context.wrap_socket(sock) as sslsock:
dercert = sslsock.getpeercert(True)
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
| lgpl-3.0 |
Navdy/lightblue-0.4 | src/series60/_lightbluecommon.py | 179 | 10831 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Defines attributes with common implementations across the different
# platforms.
# public attributes
__all__ = ("L2CAP", "RFCOMM", "OBEX", "BluetoothError", "splitclass")
# Protocol/service class types, used for sockets and advertising services
L2CAP, RFCOMM, OBEX = (10, 11, 12)
class BluetoothError(IOError):
"""
Generic exception raised for Bluetooth errors. This is not raised for
socket-related errors; socket objects raise the socket.error and
socket.timeout exceptions from the standard library socket module.
Note that error codes are currently platform-independent. In particular,
the Mac OS X implementation returns IOReturn error values from the IOKit
framework, and OBEXError codes from <IOBluetooth/OBEX.h> for OBEX operations.
"""
pass
def splitclass(classofdevice):
"""
Splits the given class of device to return a 3-item tuple with the
major service class, major device class and minor device class values.
These values indicate the device's major services and the type of the
device (e.g. mobile phone, laptop, etc.). If you google for
"assigned numbers bluetooth baseband" you might find some documents
that discuss how to extract this information from the class of device.
Example:
>>> splitclass(1057036)
(129, 1, 3)
>>>
"""
if not isinstance(classofdevice, int):
try:
classofdevice = int(classofdevice)
except (TypeError, ValueError):
raise TypeError("Given device class '%s' cannot be split" % \
str(classofdevice))
data = classofdevice >> 2 # skip over the 2 "format" bits
service = data >> 11
major = (data >> 6) & 0x1F
minor = data & 0x3F
return (service, major, minor)
_validbtaddr = None
def _isbtaddr(address):
"""
Returns whether the given address is a valid bluetooth address.
For example, "00:0e:6d:7b:a2:0a" is a valid address.
Returns False if the argument is None or is not a string.
"""
# Define validity regex. Accept either ":" or "-" as separators.
global _validbtaddr
if _validbtaddr is None:
import re
_validbtaddr = re.compile("((\d|[a-f]){2}(:|-)){5}(\d|[a-f]){2}",
re.IGNORECASE)
import types
if not isinstance(address, types.StringTypes):
return False
return _validbtaddr.match(address) is not None
# --------- other attributes ---------
def _joinclass(codtuple):
"""
The opposite of splitclass(). Joins a (service, major, minor) class-of-
device tuple into a whole class of device value.
"""
if not isinstance(codtuple, tuple):
raise TypeError("argument must be tuple, was %s" % type(codtuple))
if len(codtuple) != 3:
raise ValueError("tuple must have 3 items, has %d" % len(codtuple))
serviceclass = codtuple[0] << 2 << 11
majorclass = codtuple[1] << 2 << 6
minorclass = codtuple[2] << 2
return (serviceclass | majorclass | minorclass)
# Docstrings for socket objects.
# Based on std lib socket docs.
_socketdocs = {
"accept":
"""
accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket representing the
connection, and the address of the client. For RFCOMM sockets, the address
info is a pair (hostaddr, channel).
The socket must be bound and listening before calling this method.
""",
"bind":
"""
bind(address)
Bind the socket to a local address. For RFCOMM sockets, the address is a
pair (host, channel); the host must refer to the local host.
A port value of 0 binds the socket to a dynamically assigned port.
(Note that on Mac OS X, the port value must always be 0.)
The socket must not already be bound.
""",
"close":
"""
close()
Close the socket. It cannot be used after this call.
""",
"connect":
"""
connect(address)
Connect the socket to a remote address. The address should be a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
The socket must not be already connected.
""",
"connect_ex":
"""
connect_ex(address) -> errno
This is like connect(address), but returns an error code instead of raising
an exception when an error occurs.
""",
"dup":
"""
dup() -> socket object
Returns a new socket object connected to the same system resource.
""",
"fileno":
"""
fileno() -> integer
Return the integer file descriptor of the socket.
Raises NotImplementedError on Mac OS X and Python For Series 60.
""",
"getpeername":
"""
getpeername() -> address info
Return the address of the remote endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected, socket.error will be raised.
""",
"getsockname":
"""
getsockname() -> address info
Return the address of the local endpoint. The address info is a
(host, channel) pair for RFCOMM sockets, and a (host, PSM) pair for L2CAP
sockets.
If the socket has not been connected nor bound, this returns the tuple
("00:00:00:00:00:00", 0).
""",
"getsockopt":
"""
getsockopt(level, option[, bufsize]) -> value
Get a socket option. See the Unix manual for level and option.
If a nonzero buffersize argument is given, the return value is a
string of that length; otherwise it is an integer.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raises socket.error.
""",
"gettimeout":
"""
gettimeout() -> timeout
Returns the timeout in floating seconds associated with socket
operations. A timeout of None indicates that timeouts on socket
operations are disabled.
Currently not supported on Python For Series 60 implementation, which
will always return None.
""",
"listen":
"""
listen(backlog)
Enable a server to accept connections. The backlog argument must be at
least 1; it specifies the number of unaccepted connection that the system
will allow before refusing new connections.
The socket must not be already listening.
Currently not implemented on Mac OS X.
""",
"makefile":
"""
makefile([mode[, bufsize]]) -> file object
Returns a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function.
""",
"recv":
"""
recv(bufsize[, flags]) -> data
Receive up to bufsize bytes from the socket. For the optional flags
argument, see the Unix manual. When no data is available, block until
at least one byte is available or until the remote end is closed. When
the remote end is closed and all data is read, return the empty string.
Currently the flags argument has no effect on Mac OS X.
""",
"recvfrom":
"""
recvfrom(bufsize[, flags]) -> (data, address info)
Like recv(buffersize, flags) but also return the sender's address info.
""",
"send":
"""
send(data[, flags]) -> count
Send a data string to the socket. For the optional flags
argument, see the Unix manual. Return the number of bytes
sent.
The socket must be connected to a remote socket.
Currently the flags argument has no effect on Mac OS X.
""",
"sendall":
"""
sendall(data[, flags])
Send a data string to the socket. For the optional flags
argument, see the Unix manual. This calls send() repeatedly
until all data is sent. If an error occurs, it's impossible
to tell how much data has been sent.
""",
"sendto":
"""
sendto(data[, flags], address) -> count
Like send(data, flags) but allows specifying the destination address.
For RFCOMM sockets, the address is a pair (hostaddr, channel).
""",
"setblocking":
"""
setblocking(flag)
Set the socket to blocking (flag is true) or non-blocking (false).
setblocking(True) is equivalent to settimeout(None);
setblocking(False) is equivalent to settimeout(0.0).
Initially a socket is in blocking mode. In non-blocking mode, if a
socket operation cannot be performed immediately, socket.error is raised.
The underlying implementation on Python for Series 60 only supports
non-blocking mode for send() and recv(), and ignores it for connect() and
accept().
""",
"setsockopt":
"""
setsockopt(level, option, value)
Set a socket option. See the Unix manual for level and option.
The value argument can either be an integer or a string.
Currently support for socket options are platform independent -- i.e.
depends on the underlying Series 60 or BlueZ socket options support.
The Mac OS X implementation currently does not support any options at
all and automatically raise socket.error.
""",
"settimeout":
"""
settimeout(timeout)
Set a timeout on socket operations. 'timeout' can be a float,
giving in seconds, or None. Setting a timeout of None disables
the timeout feature and is equivalent to setblocking(1).
Setting a timeout of zero is the same as setblocking(0).
If a timeout is set, the connect, accept, send and receive operations will
raise socket.timeout if a timeout occurs.
Raises NotImplementedError on Python For Series 60.
""",
"shutdown":
"""
shutdown(how)
Shut down the reading side of the socket (flag == socket.SHUT_RD), the
writing side of the socket (flag == socket.SHUT_WR), or both ends
(flag == socket.SHUT_RDWR).
"""
}
| gpl-3.0 |
flyfei/python-for-android | python-modules/zope/zope/interface/tests/test_verify.py | 50 | 4612 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interface Verify tests
$Id: test_verify.py 110536 2010-04-06 02:59:44Z tseaver $
"""
import doctest
import unittest
from zope.interface import Interface, implements, classImplements, Attribute
from zope.interface.verify import verifyClass, verifyObject
from zope.interface.exceptions import DoesNotImplement, BrokenImplementation
from zope.interface.exceptions import BrokenMethodImplementation
class Test(unittest.TestCase):
def testNotImplemented(self):
class C(object): pass
class I(Interface): pass
self.assertRaises(DoesNotImplement, verifyClass, I, C)
classImplements(C, I)
verifyClass(I, C)
def testMissingAttr(self):
class I(Interface):
def f(): pass
class C(object):
implements(I)
self.assertRaises(BrokenImplementation, verifyClass, I, C)
C.f=lambda self: None
verifyClass(I, C)
def testMissingAttr_with_Extended_Interface(self):
class II(Interface):
def f():
pass
class I(II):
pass
class C(object):
implements(I)
self.assertRaises(BrokenImplementation, verifyClass, I, C)
C.f=lambda self: None
verifyClass(I, C)
def testWrongArgs(self):
class I(Interface):
def f(a): pass
class C(object):
def f(self, b): pass
implements(I)
# We no longer require names to match.
#self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a: None
verifyClass(I, C)
C.f=lambda self, **kw: None
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a, *args: None
verifyClass(I, C)
C.f=lambda self, a, *args, **kw: None
verifyClass(I, C)
C.f=lambda self, *args: None
verifyClass(I, C)
def testExtraArgs(self):
class I(Interface):
def f(a): pass
class C(object):
def f(self, a, b): pass
implements(I)
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a: None
verifyClass(I, C)
C.f=lambda self, a, b=None: None
verifyClass(I, C)
def testNoVar(self):
class I(Interface):
def f(a, *args): pass
class C(object):
def f(self, a): pass
implements(I)
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a, *foo: None
verifyClass(I, C)
def testNoKW(self):
class I(Interface):
def f(a, **args): pass
class C(object):
def f(self, a): pass
implements(I)
self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
C.f=lambda self, a, **foo: None
verifyClass(I, C)
def testModule(self):
from zope.interface.tests.ifoo import IFoo
from zope.interface.tests import dummy
verifyObject(IFoo, dummy)
def testMethodForAttr(self):
class IFoo(Interface):
foo = Attribute("The foo Attribute")
class Foo:
implements(IFoo)
def foo(self):
pass
verifyClass(IFoo, Foo)
def testNonMethodForMethod(self):
class IBar(Interface):
def foo():
pass
class Bar:
implements(IBar)
foo = 1
self.assertRaises(BrokenMethodImplementation, verifyClass, IBar, Bar)
def test_suite():
loader=unittest.TestLoader()
return unittest.TestSuite((
doctest.DocFileSuite(
'../verify.txt',
optionflags=doctest.NORMALIZE_WHITESPACE),
loader.loadTestsFromTestCase(Test),
))
if __name__=='__main__':
unittest.TextTestRunner().run(test_suite())
| apache-2.0 |
lcy-seso/Paddle | python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py | 4 | 7851 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest
import numpy as np
import copy
from op_test import OpTest
def iou(box_a, box_b):
"""Apply intersection-over-union overlap between box_a and box_b
"""
xmin_a = min(box_a[0], box_a[2])
ymin_a = min(box_a[1], box_a[3])
xmax_a = max(box_a[0], box_a[2])
ymax_a = max(box_a[1], box_a[3])
xmin_b = min(box_b[0], box_b[2])
ymin_b = min(box_b[1], box_b[3])
xmax_b = max(box_b[0], box_b[2])
ymax_b = max(box_b[1], box_b[3])
area_a = (ymax_a - ymin_a) * (xmax_a - xmin_a)
area_b = (ymax_b - ymin_b) * (xmax_b - xmin_b)
if area_a <= 0 and area_b <= 0:
return 0.0
xa = max(xmin_a, xmin_b)
ya = max(ymin_a, ymin_b)
xb = min(xmax_a, xmax_b)
yb = min(ymax_a, ymax_b)
inter_area = max(xb - xa, 0.0) * max(yb - ya, 0.0)
box_a_area = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])
box_b_area = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])
iou_ratio = inter_area / (area_a + area_b - inter_area)
return iou_ratio
def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
score_threshold: (float) The confidence thresh for filtering low
confidence boxes.
nms_threshold: (float) The overlap thresh for suppressing unnecessary
boxes.
top_k: (int) The maximum number of box preds to consider.
eta: (float) The parameter for adaptive NMS.
Return:
The indices of the kept boxes with respect to num_priors.
"""
all_scores = copy.deepcopy(scores)
all_scores = all_scores.flatten()
selected_indices = np.argwhere(all_scores > score_threshold)
selected_indices = selected_indices.flatten()
all_scores = all_scores[selected_indices]
sorted_indices = np.argsort(-all_scores, axis=0, kind='mergesort')
sorted_scores = all_scores[sorted_indices]
if top_k > -1 and top_k < sorted_indices.shape[0]:
sorted_indices = sorted_indices[:top_k]
sorted_scores = sorted_scores[:top_k]
selected_indices = []
adaptive_threshold = nms_threshold
for i in range(sorted_scores.shape[0]):
idx = sorted_indices[i]
keep = True
for k in range(len(selected_indices)):
if keep:
kept_idx = selected_indices[k]
overlap = iou(boxes[idx], boxes[kept_idx])
keep = True if overlap <= adaptive_threshold else False
else:
break
if keep:
selected_indices.append(idx)
if keep and eta < 1 and adaptive_threshold > 0.5:
adaptive_threshold *= eta
return selected_indices
def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold,
nms_top_k, keep_top_k):
class_num = scores.shape[0]
priorbox_num = scores.shape[1]
selected_indices = {}
num_det = 0
for c in range(class_num):
if c == background: continue
indices = nms(boxes, scores[c], score_threshold, nms_threshold,
nms_top_k)
selected_indices[c] = indices
num_det += len(indices)
if keep_top_k > -1 and num_det > keep_top_k:
score_index = []
for c, indices in selected_indices.iteritems():
for idx in indices:
score_index.append((scores[c][idx], c, idx))
sorted_score_index = sorted(
score_index, key=lambda tup: tup[0], reverse=True)
sorted_score_index = sorted_score_index[:keep_top_k]
selected_indices = {}
for _, c, _ in sorted_score_index:
selected_indices[c] = []
for s, c, idx in sorted_score_index:
selected_indices[c].append(idx)
num_det = keep_top_k
return selected_indices, num_det
def batched_multiclass_nms(boxes, scores, background, score_threshold,
nms_threshold, nms_top_k, keep_top_k):
batch_size = scores.shape[0]
det_outs = []
lod = [0]
for n in range(batch_size):
nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background,
score_threshold, nms_threshold,
nms_top_k, keep_top_k)
lod.append(lod[-1] + nmsed_num)
if nmsed_num == 0: continue
for c, indices in nmsed_outs.iteritems():
for idx in indices:
xmin, ymin, xmax, ymax = boxes[n][idx][:]
det_outs.append([c, scores[n][c][idx], xmin, ymin, xmax, ymax])
return det_outs, lod
class TestMulticlassNMSOp(OpTest):
def set_argument(self):
self.score_threshold = 0.01
def setUp(self):
self.set_argument()
N = 7
M = 1200
C = 21
BOX_SIZE = 4
background = 0
nms_threshold = 0.3
nms_top_k = 400
keep_top_k = 200
score_threshold = self.score_threshold
scores = np.random.random((N * M, C)).astype('float32')
def softmax(x):
shiftx = x - np.max(x).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
scores = np.apply_along_axis(softmax, 1, scores)
scores = np.reshape(scores, (N, M, C))
scores = np.transpose(scores, (0, 2, 1))
boxes = np.random.random((N, M, BOX_SIZE)).astype('float32')
boxes[:, :, 0:2] = boxes[:, :, 0:2] * 0.5
boxes[:, :, 2:4] = boxes[:, :, 2:4] * 0.5 + 0.5
nmsed_outs, lod = batched_multiclass_nms(boxes, scores, background,
score_threshold, nms_threshold,
nms_top_k, keep_top_k)
nmsed_outs = [-1] if not nmsed_outs else nmsed_outs
nmsed_outs = np.array(nmsed_outs).astype('float32')
self.op_type = 'multiclass_nms'
self.inputs = {'BBoxes': boxes, 'Scores': scores}
self.outputs = {'Out': (nmsed_outs, [lod])}
self.attrs = {
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
}
def test_check_output(self):
self.check_output()
class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp):
def set_argument(self):
# Here set 2.0 to test the case there is no outputs.
# In practical use, 0.0 < score_threshold < 1.0
self.score_threshold = 2.0
class TestIOU(unittest.TestCase):
def test_iou(self):
box1 = np.array([4.0, 3.0, 7.0, 5.0]).astype('float32')
box2 = np.array([3.0, 4.0, 6.0, 8.0]).astype('float32')
expt_output = np.array([2.0 / 16.0]).astype('float32')
calc_output = np.array([iou(box1, box2)]).astype('float32')
self.assertTrue(np.allclose(calc_output, expt_output))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
windskyer/nova | nova/tests/unit/virt/test_images.py | 67 | 1681 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_concurrency import processutils
from nova import exception
from nova import test
from nova import utils
from nova.virt import images
class QemuTestCase(test.NoDBTestCase):
def test_qemu_info_with_bad_path(self):
self.assertRaises(exception.InvalidDiskInfo,
images.qemu_img_info,
'/path/that/does/not/exist')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_qemu_info_with_errors(self, path_exists):
self.assertRaises(processutils.ProcessExecutionError,
images.qemu_img_info,
'/fake/path')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(utils, 'execute',
return_value=('stdout', None))
def test_qemu_info_with_no_errors(self, path_exists,
utils_execute):
image_info = images.qemu_img_info('/fake/path')
self.assertTrue(image_info)
self.assertTrue(str(image_info))
| gpl-2.0 |
googlearchive/appengine-guestbook-namespaces-python | appengine_config.py | 4 | 2515 | # Copyright 2010 Google Inc. All Rights Reserved.
"""
Manages the namespace for the application.
This file presents ways an ISV (Independent Software Vendor) might use
namespaces to distribute the guestbook application to different corporate
clients. The original guestbook.py is left unchanged. Our namespace choosing
hook is run when datastore or memcache attempt to resolve the namespace.
When defined in appengine_config.py the lib_config mechanism substitutes this
function for the default definition which returns None. This hopefully shows how
easy it can be to make an existing app namespace aware.
Setting _NAMESPACE_PICKER has the following effects:
If _USE_SERVER_NAME, we read the server name
foo.guestbook-isv.appspot.com and set the namespace.
If _USE_GOOGLE_APPS_DOMAIN, we allow the namespace manager to infer the
namespace from the request.
If _USE_COOKIE, then the ISV might have a gateway page that sets a cookie called
'namespace' for example, and we read this cookie and set the namespace to its
value. Note this is not a secure use of cookies.
Other possibilities not implemented here include using a mapping from user to
namespace and possibly setting a namespace cookie from this mapping. If the
mapping is stored in datastore, we would probably not wish to look it up on
every query.
"""
__author__ = 'nverne@google.com (Nicholas Verne)'
import Cookie
import os
from google.appengine.api import namespace_manager
_USE_SERVER_NAME = 0
_USE_GOOGLE_APPS_DOMAIN = 1
_USE_COOKIE = 2
_NAMESPACE_PICKER = _USE_SERVER_NAME
def namespace_manager_default_namespace_for_request():
"""Determine which namespace is to be used for a request.
The value of _NAMESPACE_PICKER has the following effects:
If _USE_SERVER_NAME, we read server name
foo.guestbook-isv.appspot.com and set the namespace.
If _USE_GOOGLE_APPS_DOMAIN, we allow the namespace manager to infer
the namespace from the request.
If _USE_COOKIE, then the ISV might have a gateway page that sets a
cookie called 'namespace', and we set the namespace to the cookie's value
"""
name = None
if _NAMESPACE_PICKER == _USE_SERVER_NAME:
name = os.environ['SERVER_NAME']
elif _NAMESPACE_PICKER == _USE_GOOGLE_APPS_DOMAIN:
name = namespace_manager.google_apps_namespace()
elif _NAMESPACE_PICKER == _USE_COOKIE:
cookies = os.getenv('HTTP_COOKIE', None)
if cookies:
name = Cookie.BaseCookie(cookies).get('namespace')
return name
| apache-2.0 |
schumi2004/NOT_UPDATED_Sick-Beard-Dutch | lib/imdb/parser/http/searchCharacterParser.py | 67 | 2793 | """
parser.http.searchCharacterParser module (imdb package).
This module provides the HTMLSearchCharacterParser class (and the
search_character_parser instance), used to parse the results of a search
for a given character.
E.g., when searching for the name "Jesse James", the parsed page would be:
http://akas.imdb.com/find?s=Characters;mx=20;q=Jesse+James
Copyright 2007-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCharacterParser(DOMBasicMovieParser):
"""Simply get the name of a character and the imdbID.
It's used by the DOMHTMLSearchCharacterParser class to return a result
for a direct match (when a search on IMDb results in a single
character, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(x or u'', canonical=False)
class DOMHTMLSearchCharacterParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCharacterParser
_notDirectHitTitle = '<title>imdb search'
_titleBuilder = lambda self, x: build_name(x, canonical=False)
_linkPrefix = '/character/ch'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
{'name': x.get('name')}
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/character/ch')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_character_parser': ((DOMHTMLSearchCharacterParser,),
{'kind': 'character', '_basic_parser': DOMBasicCharacterParser})
}
| gpl-3.0 |
magicgoose/yowsup | yowsup/layers/protocol_groups/protocolentities/notification_groups_subject.py | 61 | 2304 | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_notifications.protocolentities import NotificationProtocolEntity
from .notification_groups import GroupsNotificationProtocolEntity
class SubjectGroupsNotificationProtocolEntity(GroupsNotificationProtocolEntity):
'''
<notification notify="WhatsApp" id="{{id}}" t="{{TIMESTAMP}}" participant="{{PARTICIPANT_JID}}" from="{{GROUP_JID}}" type="w:gp2">
<subject s_t="{{subject_set_timestamp}}" s_o="{{subject_owner_jid}}" subject="{{SUBJECT}}">
</subject>
</notification>
'''
def __init__(self, _type, _id, _from, timestamp, notify, participant, subject):
super(SubjectGroupsNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, participant)
self.setSubjectData(subject)
def setSubjectData(self, subject, subjectOwner, subjectTimestamp):
self.subject = subject
self.subjectOwner = subjectOwner
self.subjectTimestamp = int(subjectTimestamp)
def getSubject(self):
return self.subject
def getSubjectOwner(self, full = True):
return self.subjectOwner if full else self.subjectOwner.split('@')[0]
def getSubjectTimestamp(self):
return self.subjectTimestamp
def __str__(self):
out = super(SubjectGroupsNotificationProtocolEntity, self).__str__()
out += "New subject: %s\n" % self.getSubject()
out += "Set by: %s\n" % self.getSubjectOwner()
return out
def toProtocolTreeNode(self):
node = super(SubjectGroupsNotificationProtocolEntity, self).toProtocolTreeNode()
subjectNode = ProtocolTreeNode("subject", {
"s_t": str(self.getSubjectTimestamp()),
"s_o": self.getSubjectOwner(),
"subject": self.getSubject()
})
node.addChild(subjectNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(SubjectGroupsNotificationProtocolEntity, SubjectGroupsNotificationProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = SubjectGroupsNotificationProtocolEntity
subjectNode = node.getChild("subject")
entity.setSubjectData(subjectNode["subject"], subjectNode["s_o"], subjectNode["s_t"])
return entity
| gpl-3.0 |
Xeralux/tensorflow | tensorflow/python/framework/ops_test.py | 1 | 111343 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
ops._set_call_cpp_shape_fn(common_shapes.call_cpp_shape_fn)
@test_util.with_c_api
class ResourceTest(test_util.TensorFlowTestCase):
def testBuildGraph(self):
with self.test_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
def testInitialize(self):
with self.test_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
@test_util.with_c_api
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(isinstance(t, ops.Tensor))
with self.assertRaisesRegexp(TypeError, "iter"):
for _ in t:
pass
def testAddShape(self):
with self.test_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
def testUnknownDim(self):
with self.test_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
def testUnknownShape(self):
with self.test_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
def testScalarShape(self):
with self.test_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.scalar(), c.shape)
def testShapeFunctionError(self):
with self.test_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegexp(
ValueError,
r"Dimensions must be equal, but are 2 and 5 for 'add' \(op: 'Add'\) "
r"with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
@test_util.with_c_api
class IndexedSlicesTest(test_util.TensorFlowTestCase):
def testToTensor(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
dense_shape = constant_op.constant([3, 2])
x = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertAllEqual(tensor.eval(), [[2, 3], [0, 0], [5, 7]])
def testNegation(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values.eval(), [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices.eval(), [0, 2])
def testScalarMul(self):
with self.test_session():
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values.eval(), [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices.eval(), [0, 2])
@test_util.with_c_api
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def testArgs(self):
nodedef = ops._NodeDef("foo", "bar", device="/device:baz:*")
self.assertProtoEquals("op:'foo' name:'bar' device:'/device:baz:*'",
nodedef)
nodedef = ops._NodeDef("foo", "bar", device=pydev.DeviceSpec(job="j"))
self.assertProtoEquals("op:'foo' name:'bar' device:'/job:j'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
@test_util.with_c_api
class OperationTest(test_util.TensorFlowTestCase):
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
g._add_op(op1)
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEquals([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
g._add_op(op2)
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEquals([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
g._add_op(op3)
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
def testConvertToTensorNestedArray(self):
with self.test_session():
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
def testShapeTuple(self):
with self.test_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
def testConvertToTensorNestedTuple(self):
with self.test_session():
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, ops.convert_to_tensor(values).eval())
def testConvertToTensorNestedTensors(self):
with self.test_session():
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
def testConvertToTensorNestedMix(self):
with self.test_session():
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), tensor.eval())
def testConvertToTensorPreferred(self):
with self.test_session():
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
with self.test_session():
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
with self.test_session():
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
_ = ops.convert_to_tensor(values, dtype=dtypes.int64)
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegexp(TypeError,
r"Can't convert Operation '.*' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEquals will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
if ops._USE_C_API:
error_msg = "Operation 'FuncAttr' has no attr named 'FakeAttr'."
else:
error_msg = "No attr named 'FakeAttr' in name: \"FuncAttr\""
with self.assertRaisesRegexp(ValueError, error_msg):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
# The C API dedups redundant control edges, pure Python does not
if ops._USE_C_API: return
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, x, x, y, y])
def testAddControlInputC(self):
# The C API dedups redundant control edges, pure Python does not
if not ops._USE_C_API: return
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
def testControlInputCycle(self):
# Non-C API path has a different error message
if not ops._USE_C_API: return
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.test_session(graph=graph) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
sess.run(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [y, y])
self.assertEquals(x.consumers(), [])
self.assertEquals(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEquals(sess.run(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(sess.run(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(sess.run(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
sess.run(z)
def testUpdateInputShapeError(self):
# C-API throws the error differently.
if ops._USE_C_API:
return
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
z.op._update_input(0, y) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"Incompatible shapes: \[2,2\] vs. \[3,1\]"):
sess.run(z)
def testUpdateInputShapeErrorC(self):
if not ops._USE_C_API:
return
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
# C-API throws the error differently.
if ops._USE_C_API: return
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegexp(IndexError, "list index out of range"):
x.op._update_input(1, x) # pylint: disable=protected-access
def testUpdateInputOutOfRangeC(self):
# C-API throws the error differently.
if not ops._USE_C_API: return
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."
):
x.op._update_input(1, x) # pylint: disable=protected-access
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
# Pure Python mode doesn't create OpDefs for constants
if ops._USE_C_API:
self.assertEqual(x.op.op_def.name, "Const")
self.assertEqual(len(x.op.op_def.input_arg), 0)
self.assertEqual(len(x.op.op_def.output_arg), 1)
self.assertEqual(z.op.op_def.name, "Add")
self.assertEqual(len(z.op.op_def.input_arg), 2)
self.assertEqual(len(z.op.op_def.output_arg), 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegexp(
AttributeError, "'_InputList' object has no attribute 'append'"):
op.inputs.append(None)
@test_util.with_c_api
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
@test_util.with_c_api
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
if ops._USE_C_API:
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
else:
# Test pure-Python version to make sure C API has same behavior.
op = test_ops.int_input_int_output(x, name="myop").op
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
@test_util.enable_c_shapes
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
if ops._USE_C_API:
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
else:
# Test pure-Python version to make sure C API has same behavior.
op = array_ops.identity(x, name="myop").op
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.matrix(2, 3))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
if ops._USE_C_API:
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
else:
# Test pure-Python version to make sure C API has same behavior.
op = test_ops.int_output(name="myop").op
op2 = test_ops.int_output(name="myop_1").op
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
if ops._USE_C_API:
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
else:
# Test pure-Python version to make sure C API has same behavior.
test_ops.int_input(x, name="myop")
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
if ops._USE_C_API:
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
else:
# Test pure-Python version to make sure C API has same behavior.
test_ops.int_input(x, name="myop")
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
if ops._USE_C_API:
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
else:
with ops.control_dependencies([c]):
test_ops.int_input(x, name="myop")
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
if ops._USE_C_API:
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
else:
with ops.control_dependencies([c]):
test_ops.int_input(x, name="myop")
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
@test_util.with_c_api
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
@test_util.with_c_api
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
def testNameAndVariableScope(self):
with self.test_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
@test_util.with_c_api
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
@test_util.with_c_api
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
@test_util.with_c_api
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEquals("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEquals("foo" + s + "/FloatOutput_1", t.result[1].name)
@test_util.with_c_api
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@test_util.with_c_api
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@test_util.with_c_api
class RegistrationTest(test_util.TensorFlowTestCase):
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
@test_util.with_c_api
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
@test_util.with_c_api
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.enable_c_api
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes()
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
@test_util.with_c_api
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes()
def testNames(self):
with ops.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("") as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/") as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("") as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4") as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//") as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6") as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/") as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//") as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c") as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c") as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes()
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default") as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2") as scope2:
self.assertEqual(scope2, "default/default2/")
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertEqual(len(g3.get_operations()), 1)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
v = resource_variable_ops.ResourceVariable(3)
return v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.test_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegexp(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
v = resource_variable_ops.ResourceVariable(1)
return v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
v0 = resource_variable_ops.ResourceVariable(0)
return v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with fn_graph.as_default():
self.assertEqual(len(ops._default_graph_stack.stack), 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertEqual(len(ops._default_graph_stack.stack), 1)
# Note that the global graph is _not_ on the graph stack.
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner"), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner"), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
@test_util.with_c_api
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self._AssertDefault(orig)
g0 = ops.Graph()
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
sess.run(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
sess.run(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
sess.run(a)
@test_util.with_c_api
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
def testNoLabel(self):
with self.test_session():
self.assertAllEqual((None, None), self._get_test_attrs())
def testLabelMap(self):
with self.test_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
ops.RegisterShape("KernelLabel")(common_shapes.scalar_shape)
@test_util.with_c_api
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.enable_c_api
def testNoLabel(self):
with self.test_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
def testLabelMap(self):
with self.test_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", default_1.eval())
self.assertAllEqual(b"My label is: default", default_2.eval())
self.assertAllEqual(b"My label is: default", default_3.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_1.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_2.eval())
self.assertAllEqual(b"My label is: overload_2", overload_2.eval())
@test_util.with_c_api
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.test_session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
@test_util.with_c_api
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
@test_util.with_c_api
class ColocationGroupTest(test_util.TensorFlowTestCase):
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
def testInconsistentDeviceWithinColocate(self):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# This is allowed due to legacy but clearly wrong, since we
# should really be colocating with 'a'. We allow devices to
# override colocate_with, but we log warnings to suggest that
# this is probably unintentional or misguided.
with ops.device("/cpu:0"):
b = constant_op.constant([3.0], name="b")
self.assertEqual("/device:CPU:0", b.device)
@test_util.with_c_api
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
# TODO(skyewm): make g.graph_def_versions work with the C API enabled
if ops._USE_C_API: return
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = 7
old = test_ops.old()
with self.test_session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
def testGraphExecutionFail(self):
# TODO(skyewm): make g.graph_def_versions work with the C API enabled
if ops._USE_C_API: return
with ops.Graph().as_default() as g:
g.graph_def_versions.producer = 7
old = test_ops.old()
g.graph_def_versions.producer = versions.GRAPH_DEF_VERSION
with self.test_session(graph=g):
with self.assertRaisesRegexp(errors.UnimplementedError, self._error()):
old.run()
@test_util.with_c_api
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
@test_util.with_c_api
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
@test_util.with_c_api
class TracebackTest(test_util.TensorFlowTestCase):
def testTracebackWithStartLines(self):
with self.test_session() as sess:
a = constant_op.constant(2.0)
sess.run(
a,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
self.assertTrue(sess.graph.get_operations())
# Tests that traceback_with_start_lines is the same as traceback
# but includes one more element at the end.
for op in sess.graph.get_operations():
self.assertEquals(len(op.traceback), len(op.traceback_with_start_lines))
for frame, frame_with_start_line in zip(
op.traceback, op.traceback_with_start_lines):
self.assertEquals(5, len(frame_with_start_line))
self.assertEquals(frame, frame_with_start_line[:-1])
@test_util.with_c_api
class OutputTypesTest(test_util.TensorFlowTestCase):
"""Tests Operation._output_types property.
This test should not exist as _output_types is a private property.
This property is used by util.copy_elements and its tests would normally
cover Operation._output_types. However, we can't yet run these tests in C
API mode because their use _set_device method. This test will be deleted
once we port _set_device and run the copy tests with C API on.
"""
# TODO(iga): Remove this test
def setUp(self):
self.prev_use_c_api = ops._USE_C_API # pylint: disable=protected-access
ops._USE_C_API = True # pylint: disable=protected-access
def tearDown(self):
ops._USE_C_API = self.prev_use_c_api # pylint: disable=protected-access
def testOneOutput(self):
g = ops.Graph()
with g.as_default():
# Using a constant because creating unregistered ops
# doesn't work with the C API.
op = constant_op.constant(12, dtype=dtypes.uint16).op
# pylint: disable=protected-access
self.assertEqual([types_pb2.DT_UINT16], op._output_types)
# pylint: enable=protected-access
def testTwoDifferentOutputs(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([1, 1, 2, 4, 4, 4, 7, 8, 8],
dtype=dtypes.double)
y, _ = gen_array_ops.unique(x)
self.assertEqual([types_pb2.DT_DOUBLE, types_pb2.DT_INT32],
y.op._output_types) # pylint: disable=protected-access
def testThreeOutputs(self):
g = ops.Graph()
with g.as_default():
# Using a split operationt because creating unregistered ops
# doesn't work with the C API.
a = constant_op.constant("abc", dtype=dtypes.string, shape=[5, 30])
split0, _, _ = array_ops.split(a, [4, 15, 11], 1)
# pylint: disable=protected-access
self.assertEqual([types_pb2.DT_STRING] * 3, split0.op._output_types)
# pylint: enable=protected-access
@test_util.with_c_api
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegexp(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegexp(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
Xhanim/libgdx | extensions/gdx-freetype/jni/freetype-2.6.2/builds/mac/ascii2mpw.py | 830 | 1033 | #!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
| apache-2.0 |
mikesligo/pieproxy | ref/old-interesting.py | 1 | 3541 | #!/usr/bin/python
import socket # Import socket module
from struct import *
class Packet:
'''
Custom packet class.
Attributes created on initialisation:
IP
============
- version
- ihl
- ttl
- protocol
- s_addr
- d_addr
TCP
============
- source port
- dest_port
- sequence
- acknowledgement
- tcph_length
- data
'''
def __init__(self, packet):
packet = packet[0]
self.full = packet
if "GET" not in packet[0:3]:
print "EXPECTING GET REQUEST"
print packet
#parse ethernet header
eth_length = 14
eth_header = packet[:eth_length]
eth = unpack('!6s6sH' , eth_header)
self.eth_protocol = socket.ntohs(eth[2])
self.dest_mac = packet[0:6]
self.src_mac = packet[6:12]
# get tcp/ip data
ip_header = packet[0:20]
iph = unpack('!BBHHHBBH4s4s' , ip_header) # Unpack ip header with formatting
version_ihl = iph[0]
self.version = version_ihl >> 4
self.ihl = version_ihl & 0xF
iph_length = self.ihl * 4
self.ttl = iph[5]
self.protocol = iph[6]
self.s_addr = socket.inet_ntoa(iph[8]);
self.d_addr = socket.inet_ntoa(iph[9]);
tcp_header = packet[iph_length:iph_length+20]
tcph = unpack('!HHLLBBHHH' , tcp_header) # Unpack tcp header with formatting
self.source_port = tcph[0]
self.dest_port = tcph[1]
self.sequence = tcph[2]
self.acknowledgement = tcph[3]
doff_reserved = tcph[4]
self.tcph_length = doff_reserved >> 4
h_size = (eth_length + iph_length + self.tcph_length) * 4
print "H_SIZE " + str(h_size/4)
self.data = packet[0:]
def printpacket(self):
print 'Version : ' + str(self.version) + ' IP Header Length : ' + str(self.ihl) + ' TTL : ' + str(self.ttl) + ' Protocol : ' + str(self.protocol) + ' Source Address : ' + str(self.s_addr) + ' Destination Address : ' + str(self.d_addr)
print 'Source Port : ' + str(self.source_port) + ' Dest Port : ' + str(self.dest_port) + ' Sequence Number : ' + str(self.sequence) + ' Acknowledgement : ' + str(self.acknowledgement) + ' TCP header length : ' + str(self.tcph_length)
print 'Data: ' + self.data
print 'All: ' + self.full
class Server:
def __init__(self, host, port):
self.port = port;
self.host = host;
self.s = socket.socket()
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # don't bind address
self.s.bind((host,port))
self.s.listen(5)
def receive(self):
conn, addr = self.s.accept() # Establish connection with client.
packet = conn.recvfrom(8192)
print packet[0]
packet = Packet(packet)
packet.printpacket()
conn.close()
self.forward_packet(packet)
def forward_packet(self, packet):
s = socket.socket()
s.connect((packet.d_addr,80))
s.sendall(packet.data)
print s.recvfrom(8192)
def send_socket(self,socket):
socket.send('Thank you for connecting')
def close(self):
self.s.close()
if __name__ == '__main__':
print "Pie Proxy\n=============================\n"
host = socket.gethostname() # Get local machine name
port = 8000 # Reserve port
server = Server(host,port)
server.receive()
server.receive()
| gpl-3.0 |
bblacey/FreeCAD-MacOS-CI | src/Mod/Path/PathScripts/PathDrilling.py | 2 | 22812 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2014 Yorik van Havre <yorik@uncreated.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
from __future__ import print_function
import FreeCAD
import Path
import Part
from PySide import QtCore, QtGui
from PathScripts import PathUtils
from PathScripts.PathUtils import fmt
FreeCADGui = None
if FreeCAD.GuiUp:
import FreeCADGui
"""Path Drilling object and FreeCAD command"""
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class ObjectDrilling:
def __init__(self, obj):
obj.addProperty("App::PropertyLinkSubList", "Base","Path", QtCore.QT_TRANSLATE_NOOP("App::Property","The base geometry of this toolpath"))
obj.addProperty("App::PropertyBool", "Active", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","Make False, to prevent operation from generating code"))
obj.addProperty("App::PropertyString", "Comment", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","An optional comment for this profile"))
obj.addProperty("App::PropertyString", "UserLabel", "Path", QtCore.QT_TRANSLATE_NOOP("App::Property","User Assigned Label"))
obj.addProperty("App::PropertyLength", "PeckDepth", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Incremental Drill depth before retracting to clear chips"))
obj.addProperty("App::PropertyLength", "StartDepth", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Starting Depth of Tool- first cut depth in Z"))
obj.addProperty("App::PropertyDistance", "ClearanceHeight", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","The height needed to clear clamps and obstructions"))
obj.addProperty("App::PropertyDistance", "FinalDepth", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Final Depth of Tool- lowest value in Z"))
obj.addProperty("App::PropertyDistance", "SafeHeight", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","Height to clear top of materil"))
obj.addProperty("App::PropertyDistance", "RetractHeight", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","The height where feed starts and height during retract tool when path is finished"))
obj.addProperty("App::PropertyFloat", "DwellTime", "Depth", QtCore.QT_TRANSLATE_NOOP("App::Property","The time to dwell between peck cycles"))
# Tool Properties
obj.addProperty("App::PropertyIntegerConstraint", "ToolNumber", "Tool", QtCore.QT_TRANSLATE_NOOP("App::Property","The tool number in use"))
obj.ToolNumber = (0, 0, 1000, 1)
obj.setEditorMode('ToolNumber', 1) # make this read only
obj.addProperty("App::PropertyString", "ToolDescription", "Tool", QtCore.QT_TRANSLATE_NOOP("App::Property","The description of the tool "))
obj.setEditorMode('ToolDescription', 1) # make this read onlyt
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def onChanged(self, obj, prop):
if prop == "UserLabel":
obj.Label = obj.UserLabel + " :" + obj.ToolDescription
def execute(self, obj):
output = ""
if obj.Comment != "":
output += '(' + str(obj.Comment)+')\n'
toolLoad = PathUtils.getLastToolLoad(obj)
if toolLoad is None or toolLoad.ToolNumber == 0:
self.vertFeed = 100
self.horizFeed = 100
self.vertRapid = 100
self.horizRapid = 100
self.radius = 0.25
obj.ToolNumber = 0
obj.ToolDescription = "UNDEFINED"
else:
self.vertFeed = toolLoad.VertFeed.Value
self.horizFeed = toolLoad.HorizFeed.Value
self.vertRapid = toolLoad.VertRapid.Value
self.horizRapid = toolLoad.HorizRapid.Value
tool = PathUtils.getTool(obj, toolLoad.ToolNumber)
if tool.Diameter == 0:
self.radius = 0.25
else:
self.radius = tool.Diameter/2
obj.ToolNumber = toolLoad.ToolNumber
obj.ToolDescription = toolLoad.Name
if obj.UserLabel == "":
obj.Label = obj.Name + " :" + obj.ToolDescription
else:
obj.Label = obj.UserLabel + " :" + obj.ToolDescription
locations = []
output = "(Begin Drilling)\n"
if obj.Base:
for loc in obj.Base:
#print loc
for sub in loc[1]:
#locations.append(self._findDrillingVector(loc))
if "Face" in sub or "Edge" in sub:
s = getattr(loc[0].Shape, sub)
else:
s = loc[0].Shape
if s.ShapeType in ['Wire', 'Edge']:
X = s.Edges[0].Curve.Center.x
Y = s.Edges[0].Curve.Center.y
Z = s.Edges[0].Curve.Center.z
elif s.ShapeType in ['Vertex']:
X = s.Point.x
Y = s.Point.y
Z = s.Point.z
elif s.ShapeType in ['Face']:
#if abs(s.normalAt(0, 0).z) == 1: # horizontal face
X = s.CenterOfMass.x
Y = s.CenterOfMass.y
Z = s.CenterOfMass.z
locations.append(FreeCAD.Vector(X, Y, Z))
output += "G90 G98\n"
# rapid to clearance height
output += "G0 Z" + str(obj.ClearanceHeight.Value) + "F " + PathUtils.fmt(self.vertRapid) + "\n"
# rapid to first hole location, with spindle still retracted:
p0 = locations[0]
output += "G0 X" + fmt(p0.x) + " Y" + fmt(p0.y) + "F " + PathUtils.fmt(self.horizRapid) + "\n"
# move tool to clearance plane
output += "G0 Z" + fmt(obj.ClearanceHeight.Value) + "F " + PathUtils.fmt(self.vertRapid) + "\n"
pword = ""
qword = ""
if obj.PeckDepth.Value > 0:
cmd = "G83"
qword = " Q" + fmt(obj.PeckDepth.Value)
elif obj.DwellTime > 0:
cmd = "G82"
pword = " P" + fmt(obj.DwellTime)
else:
cmd = "G81"
for p in locations:
output += cmd + \
" X" + fmt(p.x) + \
" Y" + fmt(p.y) + \
" Z" + fmt(obj.FinalDepth.Value) + qword + pword + \
" R" + str(obj.RetractHeight.Value) + \
" F" + str(self.vertFeed) + "\n" \
output += "G80\n"
# path = Path.Path(output)
# obj.Path = path
if obj.Active:
path = Path.Path(output)
obj.Path = path
obj.ViewObject.Visibility = True
else:
path = Path.Path("(inactive operation)")
obj.Path = path
obj.ViewObject.Visibility = False
def checkdrillable(self, obj, sub):
drillable = False
if obj.ShapeType == 'Vertex':
drillable = True
elif obj.ShapeType in['Solid', 'Compound']:
if sub[0:4] == 'Face':
subobj = obj.getElement(sub)
if isinstance(subobj.Edges[0].Curve, Part.Circle):
drillable = True
if str(subobj.Surface) == "<Cylinder object>":
drillable = subobj.isClosed()
if len(subobj.Edges) == 3:
cedge = []
ledge = []
for e in subobj.Edges:
if isinstance (e.Curve, Part.Circle):
cedge.append(e)
elif isinstance (e.Curve, Part.LineSegment):
ledge.append(e)
if len(cedge) == 2 and len(ledge) == 1:
drillable = True
else:
#if len(subobj.Edges[0].Vertexes) > 1:
drillable = False
if sub[0:4] == 'Edge':
o = obj.getElement(sub)
if isinstance(o.Curve, Part.Circle):
drillable = True
return drillable
def addDrillableLocation(self, obj, ss, sub=""):
baselist = obj.Base
item = (ss, sub)
if len(baselist) == 0: # When adding the first base object, guess at heights
try:
bb = ss.Shape.BoundBox # parent boundbox
subobj = ss.Shape.getElement(sub)
fbb = subobj.BoundBox # feature boundbox
obj.StartDepth = bb.ZMax
obj.ClearanceHeight = bb.ZMax + 5.0
obj.SafeHeight = bb.ZMax + 3.0
obj.RetractHeight = bb.ZMax + 1.0
if fbb.ZMax < bb.ZMax:
obj.FinalDepth = fbb.ZMax
else:
obj.FinalDepth = bb.ZMin
except:
obj.StartDepth = 5.0
obj.ClearanceHeight = 10.0
obj.SafeHeight = 8.0
obj.RetractHeight = 6.0
if not self.checkdrillable(ss.Shape,sub):
FreeCAD.Console.PrintError("Selected element is not a drillable location" + "\n")
return
if sub[0:4] == 'Face':
# Check for other drillable faces and give user the option
drillableFaces = []
for i in range(len(ss.Shape.Faces)):
if self.checkdrillable(ss.Shape, "Face" + str(i+1)):
drillableFaces.append("Face" + str(i+1))
if len(drillableFaces) > 1:
reply = QtGui.QMessageBox.question(None,"","Multiple drillable faces found. Drill them all?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
for i in drillableFaces:
if i in baselist:
FreeCAD.Console.PrintWarning("Drillable location already in the list" + "\n")
continue
else:
newitem = (ss, i)
baselist.append(newitem)
else:
if item in baselist:
FreeCAD.Console.PrintWarning("Drillable location already in the list" + "\n")
else:
baselist.append(item)
else:
if item in baselist:
FreeCAD.Console.PrintWarning("Drillable location already in the list" + "\n")
else:
baselist.append(item)
if sub[0:4] == 'Edge':
drillableEdges = []
o = ss.Shape.getElement(sub)
for i in range(len(ss.Shape.Edges)):
candidateedge = ss.Shape.getElement("Edge" + str(i+1))
if self.checkdrillable(ss.Shape, "Edge" + str(i+1)):
if candidateedge.Curve.Radius == o.Curve.Radius and candidateedge.Curve.Center.z == o.Curve.Center.z:
drillableEdges.append("Edge" + str(i+1))
if len(drillableEdges) > 1:
reply = QtGui.QMessageBox.question(None,"","Multiple drillable edges found. Drill them all?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
for i in drillableEdges:
if i in baselist:
FreeCAD.Console.PrintWarning("Drillable location already in the list" + "\n")
continue
else:
newitem = (ss, i)
baselist.append(newitem)
else:
if item in baselist:
FreeCAD.Console.PrintWarning("Drillable location already in the list" + "\n")
else:
baselist.append(item)
else:
if item in baselist:
FreeCAD.Console.PrintWarning("Drillable location already in the list" + "\n")
else:
baselist.append(item)
print(baselist)
obj.Base = baselist
self.execute(obj)
class _ViewProviderDrill:
def __init__(self, obj):
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def getIcon(self):
return ":/icons/Path-Drilling.svg"
def onChanged(self, obj, prop):
# this is executed when a property of the VIEW PROVIDER changes
pass
def updateData(self, obj, prop):
# this is executed when a property of the APP OBJECT changes
pass
def setEdit(self, vobj, mode=0):
FreeCADGui.Control.closeDialog()
taskd = TaskPanel()
taskd.obj = vobj.Object
FreeCADGui.Control.showDialog(taskd)
taskd.setupUi()
return True
def unsetEdit(self, vobj, mode):
# this is executed when the user cancels or terminates edit mode
pass
class CommandPathDrilling:
def GetResources(self):
return {'Pixmap': 'Path-Drilling',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_Drilling", "Drilling"),
'Accel': "P, D",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_Drilling", "Creates a Path Drilling object")}
def IsActive(self):
if FreeCAD.ActiveDocument is not None:
for o in FreeCAD.ActiveDocument.Objects:
if o.Name[:3] == "Job":
return True
return False
def Activated(self):
# if everything is ok, execute and register the transaction in the undo/redo stack
FreeCAD.ActiveDocument.openTransaction(translate("Path_Drilling", "Create Drilling"))
FreeCADGui.addModule("PathScripts.PathDrilling")
FreeCADGui.doCommand('obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython", "Drilling")')
FreeCADGui.doCommand('PathScripts.PathDrilling.ObjectDrilling(obj)')
FreeCADGui.doCommand('obj.Active = True')
FreeCADGui.doCommand('PathScripts.PathDrilling._ViewProviderDrill(obj.ViewObject)')
ztop = 10.0
zbottom = 0.0
FreeCADGui.doCommand('obj.ClearanceHeight = ' + str(ztop))
FreeCADGui.doCommand('obj.RetractHeight= ' + str(ztop))
FreeCADGui.doCommand('obj.FinalDepth=' + str(zbottom))
FreeCADGui.doCommand('PathScripts.PathUtils.addToJob(obj)')
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.doCommand('obj.ViewObject.startEditing()')
class TaskPanel:
def __init__(self):
self.form = FreeCADGui.PySideUic.loadUi(":/panels/DrillingEdit.ui")
def accept(self):
self.getFields()
FreeCADGui.ActiveDocument.resetEdit()
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
def reject(self):
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
def getFields(self):
if self.obj:
if hasattr(self.obj, "StartDepth"):
self.obj.StartDepth = FreeCAD.Units.Quantity(self.form.startDepth.text()).Value
if hasattr(self.obj, "FinalDepth"):
self.obj.FinalDepth = FreeCAD.Units.Quantity(self.form.finalDepth.text()).Value
if hasattr(self.obj, "PeckDepth"):
self.obj.PeckDepth = FreeCAD.Units.Quantity(self.form.peckDepth.text()).Value
if hasattr(self.obj, "SafeHeight"):
self.obj.SafeHeight = FreeCAD.Units.Quantity(self.form.safeHeight.text()).Value
if hasattr(self.obj, "ClearanceHeight"):
self.obj.ClearanceHeight = FreeCAD.Units.Quantity(self.form.clearanceHeight.text()).Value
if hasattr(self.obj, "RetractHeight"):
self.obj.RetractHeight = FreeCAD.Units.Quantity(self.form.retractHeight.text()).Value
self.obj.Proxy.execute(self.obj)
def setFields(self):
self.form.startDepth.setText(FreeCAD.Units.Quantity(self.obj.StartDepth.Value, FreeCAD.Units.Length).UserString)
self.form.finalDepth.setText(FreeCAD.Units.Quantity(self.obj.FinalDepth.Value, FreeCAD.Units.Length).UserString)
self.form.peckDepth.setText(FreeCAD.Units.Quantity(self.obj.PeckDepth.Value, FreeCAD.Units.Length).UserString)
self.form.safeHeight.setText(FreeCAD.Units.Quantity(self.obj.SafeHeight.Value, FreeCAD.Units.Length).UserString)
self.form.clearanceHeight.setText(FreeCAD.Units.Quantity(self.obj.ClearanceHeight.Value, FreeCAD.Units.Length).UserString)
self.form.retractHeight.setText(FreeCAD.Units.Quantity(self.obj.RetractHeight.Value, FreeCAD.Units.Length).UserString)
self.form.baseList.clear()
for i in self.obj.Base:
for sub in i[1]:
self.form.baseList.addItem(i[0].Name + "." + sub)
def open(self):
self.s = SelObserver()
FreeCADGui.Selection.addObserver(self.s)
def addBase(self):
# check that the selection contains exactly what we want
selection = FreeCADGui.Selection.getSelectionEx()
if not len(selection) >= 1:
FreeCAD.Console.PrintError(translate("PathProject", "Please select at least one Drillable Location\n"))
return
for s in selection:
if s.HasSubObjects:
for i in s.SubElementNames:
self.obj.Proxy.addDrillableLocation(self.obj, s.Object, i)
else:
self.obj.Proxy.addDrillableLocation(self.obj, s.Object)
self.setFields() # defaults may have changed. Reload.
self.form.baseList.clear()
for i in self.obj.Base:
for sub in i[1]:
self.form.baseList.addItem(i[0].Name + "." + sub)
def deleteBase(self):
dlist = self.form.baseList.selectedItems()
for d in dlist:
newlist = []
for i in self.obj.Base:
if not i[0].Name == d.text().partition(".")[0]:
newlist.append(i)
self.obj.Base = newlist
self.form.baseList.takeItem(self.form.baseList.row(d))
# self.obj.Proxy.execute(self.obj)
# FreeCAD.ActiveDocument.recompute()
def itemActivated(self):
FreeCADGui.Selection.clearSelection()
slist = self.form.baseList.selectedItems()
for i in slist:
objstring = i.text().partition(".")
obj = FreeCAD.ActiveDocument.getObject(objstring[0])
# sub = o.Shape.getElement(objstring[2])
if objstring[2] != "":
FreeCADGui.Selection.addSelection(obj, objstring[2])
else:
FreeCADGui.Selection.addSelection(obj)
FreeCADGui.updateGui()
def reorderBase(self):
newlist = []
for i in range(self.form.baseList.count()):
s = self.form.baseList.item(i).text()
objstring = s.partition(".")
obj = FreeCAD.ActiveDocument.getObject(objstring[0])
item = (obj, str(objstring[2]))
newlist.append(item)
self.obj.Base = newlist
self.obj.Proxy.execute(self.obj)
FreeCAD.ActiveDocument.recompute()
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Ok)
def setupUi(self):
# Connect Signals and Slots
self.form.startDepth.editingFinished.connect(self.getFields)
self.form.finalDepth.editingFinished.connect(self.getFields)
self.form.safeHeight.editingFinished.connect(self.getFields)
self.form.clearanceHeight.editingFinished.connect(self.getFields)
self.form.addBase.clicked.connect(self.addBase)
self.form.deleteBase.clicked.connect(self.deleteBase)
self.form.reorderBase.clicked.connect(self.reorderBase)
self.form.baseList.itemSelectionChanged.connect(self.itemActivated)
sel = FreeCADGui.Selection.getSelectionEx()
if len(sel) != 0 and sel[0].HasSubObjects:
self.addBase()
self.setFields()
class SelObserver:
def __init__(self):
import PathScripts.PathSelection as PST
PST.drillselect()
def __del__(self):
import PathScripts.PathSelection as PST
PST.clear()
def addSelection(self, doc, obj, sub, pnt):
FreeCADGui.doCommand('Gui.Selection.addSelection(FreeCAD.ActiveDocument.' + obj + ')')
FreeCADGui.updateGui()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Drilling', CommandPathDrilling())
FreeCAD.Console.PrintLog("Loading PathDrilling... done\n")
| lgpl-2.1 |
Eigenlabs/EigenD | pi/toggle.py | 3 | 2384 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
from pi import action, atom, bundles, const, domain
import piw
class Toggle(atom.Atom):
def __init__(self,e,d,**kw):
self.__enable = e
atom.Atom.__init__(self,domain=domain.Bool(),protocols='set',policy=atom.default_policy(self.__change),**kw)
self.add_verb2(1,'set([],~a,role(None,[instance(~self)]))', self.__verb_set)
self.add_verb2(2,'set([un],~a,role(None,[instance(~self)]))', self.__verb_unset)
self.add_verb2(3,'set([toggle],~a,role(None,[instance(~self)]))', callback=self.__verb_togset, status_action=self.__status)
self[1]=bundles.Output(1,False,names='status output')
self.light_output=bundles.Splitter(d,self[1])
self.lights=piw.lightsource(piw.change_nb(),0,self.light_output.cookie())
self.lights.set_size(1)
self.__set_status(self.get_value())
self.__state = False
def __status(self,*a):
return 'dsc(~(s)".1","1")'
def __set_status(self,active):
self.lights.set_status(1,const.status_active if active else const.status_inactive)
def __change(self,e):
self.__state = e
self.__set_status(e)
self.set_value(e)
if self.__enable:
self.__enable(e)
return False
def __verb_set(self,*a):
self.__change(True)
return action.nosync_return()
def __verb_unset(self,*a):
self.__change(False)
return action.nosync_return()
def __verb_togset(self,*a):
self.__change(not self.__state)
return action.nosync_return()
def notify(self):
self.__state = False
self.__set_status(False)
self.set_value(False)
| gpl-3.0 |
ptisserand/portage | pym/portage/sync/controller.py | 1 | 9315 | # Copyright 2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import sys
import logging
import grp
import pwd
import portage
from portage import os
from portage.progress import ProgressBar
#from portage.emaint.defaults import DEFAULT_OPTIONS
#from portage.util._argparse import ArgumentParser
from portage.util import writemsg_level
from portage.output import create_color_func
good = create_color_func("GOOD")
bad = create_color_func("BAD")
warn = create_color_func("WARN")
from portage.package.ebuild.doebuild import _check_temp_dir
from portage.metadata import action_metadata
from portage import OrderedDict
from portage import _unicode_decode
from portage import util
class TaskHandler(object):
"""Handles the running of the tasks it is given
"""
def __init__(self, show_progress_bar=True, verbose=True, callback=None):
self.show_progress_bar = show_progress_bar
self.verbose = verbose
self.callback = callback
self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
self.progress_bar = ProgressBar(self.isatty, title="Portage-Sync", max_desc_length=27)
def run_tasks(self, tasks, func, status=None, verbose=True, options=None):
"""Runs the module tasks"""
# Ensure we have a task and function
assert(tasks)
assert(func)
for task in tasks:
inst = task()
show_progress = self.show_progress_bar and self.isatty
# check if the function is capable of progressbar
# and possibly override it off
if show_progress and hasattr(inst, 'can_progressbar'):
show_progress = inst.can_progressbar(func)
if show_progress:
self.progress_bar.reset()
self.progress_bar.set_label(func + " " + inst.name())
onProgress = self.progress_bar.start()
else:
onProgress = None
kwargs = {
'onProgress': onProgress,
# pass in a copy of the options so a module can not pollute or change
# them for other tasks if there is more to do.
'options': options.copy()
}
result = getattr(inst, func)(**kwargs)
if show_progress:
# make sure the final progress is displayed
self.progress_bar.display()
print()
self.progress_bar.stop()
if self.callback:
self.callback(result)
def print_results(results):
if results:
print()
print("\n".join(results))
print("\n")
class SyncManager(object):
'''Main sync control module'''
def __init__(self, settings, logger):
self.settings = settings
self.logger = logger
# Similar to emerge, sync needs a default umask so that created
# files have sane permissions.
os.umask(0o22)
self.module_controller = portage.sync.module_controller
self.module_names = self.module_controller.module_names
self.hooks = {}
for _dir in ["repo.postsync.d", "postsync.d"]:
postsync_dir = os.path.join(self.settings["PORTAGE_CONFIGROOT"],
portage.USER_CONFIG_PATH, _dir)
hooks = OrderedDict()
for filepath in util._recursive_file_list(postsync_dir):
name = filepath.split(postsync_dir)[1].lstrip(os.sep)
if os.access(filepath, os.X_OK):
hooks[filepath] = name
else:
writemsg_level(" %s %s hook: '%s' is not executable\n"
% (warn("*"), _dir, _unicode_decode(name),),
level=logging.WARN, noiselevel=2)
self.hooks[_dir] = hooks
def get_module_descriptions(self, mod):
desc = self.module_controller.get_func_descriptions(mod)
if desc:
return desc
return []
def sync(self, emerge_config=None, repo=None, callback=None):
self.emerge_config = emerge_config
self.callback = callback or self._sync_callback
self.repo = repo
self.exitcode = 1
if repo.sync_type in self.module_names[1:]:
tasks = [self.module_controller.get_class(repo.sync_type)]
else:
msg = "\n%s: Sync module '%s' is not an installed/known type'\n" \
% (bad("ERROR"), repo.sync_type)
return self.exitcode, msg
rval = self.pre_sync(repo)
if rval != os.EX_OK:
return rval, None
# need to pass the kwargs dict to the modules
# so they are available if needed.
task_opts = {
'emerge_config': emerge_config,
'logger': self.logger,
'portdb': self.trees[self.settings['EROOT']]['porttree'].dbapi,
'repo': repo,
'settings': self.settings,
'spawn_kwargs': self.spawn_kwargs,
'usersync_uid': self.usersync_uid,
'xterm_titles': self.xterm_titles,
}
func = 'sync'
status = None
taskmaster = TaskHandler(callback=self.do_callback)
taskmaster.run_tasks(tasks, func, status, options=task_opts)
self.perform_post_sync_hook(repo.name, repo.sync_uri, repo.location)
return self.exitcode, None
def do_callback(self, result):
#print("result:", result, "callback()", self.callback)
exitcode, updatecache_flg = result
self.exitcode = exitcode
if self.callback:
self.callback(exitcode, updatecache_flg)
return
def perform_post_sync_hook(self, reponame, dosyncuri='', repolocation=''):
succeeded = os.EX_OK
if reponame:
_hooks = self.hooks["repo.postsync.d"]
else:
_hooks = self.hooks["postsync.d"]
for filepath in _hooks:
writemsg_level("Spawning post_sync hook: %s\n"
% (_unicode_decode(_hooks[filepath])),
level=logging.ERROR, noiselevel=4)
retval = portage.process.spawn([filepath,
reponame, dosyncuri, repolocation], env=self.settings.environ())
if retval != os.EX_OK:
writemsg_level(" %s Spawn failed for: %s, %s\n" % (bad("*"),
_unicode_decode(_hooks[filepath]), filepath),
level=logging.ERROR, noiselevel=-1)
succeeded = retval
return succeeded
def pre_sync(self, repo):
self.settings, self.trees, self.mtimedb = self.emerge_config
self.xterm_titles = "notitles" not in self.settings.features
msg = ">>> Synchronization of repository '%s' located in '%s'..." \
% (repo.name, repo.location)
self.logger(self.xterm_titles, msg)
writemsg_level(msg + "\n")
try:
st = os.stat(repo.location)
except OSError:
st = None
self.usersync_uid = None
spawn_kwargs = {}
spawn_kwargs["env"] = self.settings.environ()
if repo.sync_user is not None:
def get_sync_user_data(sync_user):
user = None
group = None
home = None
spl = sync_user.split(':', 1)
if spl[0]:
username = spl[0]
try:
try:
pw = pwd.getpwnam(username)
except KeyError:
pw = pwd.getpwuid(int(username))
except (ValueError, KeyError):
writemsg("!!! User '%s' invalid or does not exist\n"
% username, noiselevel=-1)
return (user, group, home)
user = pw.pw_uid
group = pw.pw_gid
home = pw.pw_dir
if len(spl) > 1:
groupname = spl[1]
try:
try:
gp = grp.getgrnam(groupname)
except KeyError:
pw = grp.getgrgid(int(groupname))
except (ValueError, KeyError):
writemsg("!!! Group '%s' invalid or does not exist\n"
% groupname, noiselevel=-1)
return (user, group, home)
group = gp.gr_gid
return (user, group, home)
# user or user:group
(uid, gid, home) = get_sync_user_data(repo.sync_user)
if uid is not None:
spawn_kwargs["uid"] = uid
self.usersync_uid = uid
if gid is not None:
spawn_kwargs["gid"] = gid
spawn_kwargs["groups"] = [gid]
if home is not None:
spawn_kwargs["env"]["HOME"] = home
if st is None:
perms = {'mode': 0o755}
# respect sync-user if set
if 'umask' in spawn_kwargs:
perms['mode'] &= ~spawn_kwargs['umask']
if 'uid' in spawn_kwargs:
perms['uid'] = spawn_kwargs['uid']
if 'gid' in spawn_kwargs:
perms['gid'] = spawn_kwargs['gid']
writemsg_level(">>> '%s' not found, creating it."
% _unicode_decode(repo.location))
portage.util.ensure_dirs(repo.location, **perms)
st = os.stat(repo.location)
if (repo.sync_user is None and
'usersync' in self.settings.features and
portage.data.secpass >= 2 and
(st.st_uid != os.getuid() and st.st_mode & 0o700 or
st.st_gid != os.getgid() and st.st_mode & 0o070)):
try:
homedir = pwd.getpwuid(st.st_uid).pw_dir
except KeyError:
pass
else:
# Drop privileges when syncing, in order to match
# existing uid/gid settings.
self.usersync_uid = st.st_uid
spawn_kwargs["uid"] = st.st_uid
spawn_kwargs["gid"] = st.st_gid
spawn_kwargs["groups"] = [st.st_gid]
spawn_kwargs["env"]["HOME"] = homedir
umask = 0o002
if not st.st_mode & 0o020:
umask = umask | 0o020
spawn_kwargs["umask"] = umask
# override the defaults when sync_umask is set
if repo.sync_umask is not None:
spawn_kwargs["umask"] = int(repo.sync_umask, 8)
self.spawn_kwargs = spawn_kwargs
if self.usersync_uid is not None:
# PORTAGE_TMPDIR is used below, so validate it and
# bail out if necessary.
rval = _check_temp_dir(self.settings)
if rval != os.EX_OK:
return rval
os.umask(0o022)
return os.EX_OK
def _sync_callback(self, exitcode, updatecache_flg):
if updatecache_flg and "metadata-transfer" not in self.settings.features:
updatecache_flg = False
if updatecache_flg and \
os.path.exists(os.path.join(
self.repo.location, 'metadata', 'md5-cache')):
# Only update cache for repo.location since that's
# the only one that's been synced here.
action_metadata(self.settings, self.portdb, self.emerge_config.opts,
porttrees=[self.repo.location])
| gpl-2.0 |
github-account-because-they-want-it/django | django/contrib/gis/geos/mutable_list.py | 238 | 10705 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://static.aryehleib.com/oldsite/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from functools import total_ordering
from django.utils import six
from django.utils.six.moves import range
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
"""
_minlength = 0
_maxlength = None
# ### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in range(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = (self._get_single_internal(i)
for i in range(origLen)
if i not in indexRange)
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
# ### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n - 1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
# ### Public list interface Methods ###
# ## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i:
count += 1
return count
def index(self, val):
"Standard list index method"
for i in range(0, len(self)):
if self[i] == val:
return i
raise ValueError('%s not found in object' % str(val))
# ## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v), v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
# ### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in range(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in range(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| bsd-3-clause |
NaturalGIS/naturalgis_qgis | tests/src/python/test_qgsnetworkcontentfetcher.py | 45 | 4528 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsNetworkContentFetcher
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import chr
from builtins import str
__author__ = 'Matthias Kuhn'
__date__ = '4/28/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis # NOQA
import os
from qgis.testing import unittest, start_app
from qgis.core import QgsNetworkContentFetcher
from utilities import unitTestDataPath
from qgis.PyQt.QtCore import QUrl
from qgis.PyQt.QtNetwork import QNetworkReply, QNetworkRequest
import socketserver
import threading
import http.server
app = start_app()
class TestQgsNetworkContentFetcher(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server
os.chdir(unitTestDataPath() + '')
handler = http.server.SimpleHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
self.loaded = False
def contentLoaded(self):
self.loaded = True
def testFetchEmptyUrl(self):
fetcher = QgsNetworkContentFetcher()
self.loaded = False
fetcher.fetchContent(QUrl())
fetcher.finished.connect(self.contentLoaded)
while not self.loaded:
app.processEvents()
r = fetcher.reply()
assert r.error() != QNetworkReply.NoError
def testFetchBadUrl(self):
fetcher = QgsNetworkContentFetcher()
self.loaded = False
fetcher.fetchContent(QUrl('http://x'))
fetcher.finished.connect(self.contentLoaded)
while not self.loaded:
app.processEvents()
r = fetcher.reply()
assert r.error() != QNetworkReply.NoError
def testFetchUrlContent(self):
fetcher = QgsNetworkContentFetcher()
self.loaded = False
fetcher.fetchContent(QUrl('http://localhost:' + str(TestQgsNetworkContentFetcher.port) + '/qgis_local_server/index.html'))
fetcher.finished.connect(self.contentLoaded)
while not self.loaded:
app.processEvents()
r = fetcher.reply()
assert r.error() == QNetworkReply.NoError, r.error()
html = fetcher.contentAsString()
assert 'QGIS' in html
def testFetchRequestContent(self):
fetcher = QgsNetworkContentFetcher()
self.loaded = False
request = QNetworkRequest(QUrl('http://localhost:' + str(TestQgsNetworkContentFetcher.port) + '/qgis_local_server/index.html'))
fetcher.fetchContent(request)
fetcher.finished.connect(self.contentLoaded)
while not self.loaded:
app.processEvents()
r = fetcher.reply()
assert r.error() == QNetworkReply.NoError, r.error()
html = fetcher.contentAsString()
assert 'QGIS' in html
def testDoubleFetch(self):
fetcher = QgsNetworkContentFetcher()
self.loaded = False
fetcher.fetchContent(QUrl('http://www.qgis.org/'))
# double fetch - this should happen before previous request finishes
fetcher.fetchContent(QUrl('http://localhost:' + str(TestQgsNetworkContentFetcher.port) + '/qgis_local_server/index.html'))
fetcher.finished.connect(self.contentLoaded)
while not self.loaded:
app.processEvents()
r = fetcher.reply()
assert r.error() == QNetworkReply.NoError, r.error()
html = fetcher.contentAsString()
assert 'QGIS' in html
def testFetchEncodedContent(self):
fetcher = QgsNetworkContentFetcher()
self.loaded = False
fetcher.fetchContent(QUrl('http://localhost:' + str(TestQgsNetworkContentFetcher.port) + '/encoded_html.html'))
fetcher.finished.connect(self.contentLoaded)
while not self.loaded:
app.processEvents()
r = fetcher.reply()
assert r.error() == QNetworkReply.NoError, r.error()
html = fetcher.contentAsString()
assert chr(6040) in html
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
Mariusz1970/enigma2 | lib/python/Components/Sources/FrontendInfo.py | 35 | 1642 | from enigma import iPlayableService
from Source import Source
from Components.PerServiceDisplay import PerServiceBase
class FrontendInfo(Source, PerServiceBase):
def __init__(self, service_source = None, frontend_source = None, navcore = None):
self.navcore = None
Source.__init__(self)
if navcore:
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evTunedIn: self.updateFrontendData,
iPlayableService.evEnd: self.serviceEnd
})
self.service_source = service_source
self.frontend_source = frontend_source
self.updateFrontendData()
def serviceEnd(self):
# import pdb
# pdb.set_trace()
self.slot_number = self.frontend_type = None
self.changed((self.CHANGED_CLEAR, ))
def updateFrontendData(self):
data = self.getFrontendData()
if not data:
self.slot_number = self.frontend_type = None
else:
self.slot_number = data.get("tuner_number")
self.frontend_type = data.get("tuner_type")
self.changed((self.CHANGED_ALL, ))
def getFrontendData(self):
if self.frontend_source:
frontend = self.frontend_source()
dict = { }
if frontend:
frontend.getFrontendData(dict)
return dict
elif self.service_source:
service = self.navcore and self.service_source()
feinfo = service and service.frontendInfo()
return feinfo and feinfo.getFrontendData()
elif self.navcore:
service = self.navcore.getCurrentService()
feinfo = service and service.frontendInfo()
return feinfo and feinfo.getFrontendData()
else:
return None
def destroy(self):
if not self.frontend_source and not self.service_source:
PerServiceBase.destroy(self)
Source.destroy(self)
| gpl-2.0 |
offbye/paparazzi | sw/tools/parrot/ardrone2.py | 25 | 22475 | #!/usr/bin/env python
#
# Copyright (C) 2012-2014 The Paparazzi Team
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import re
import argparse
import os
from time import sleep
import parrot_utils
# Read from config.ini
def read_from_config(name, config=''):
if config == '':
config = parrot_utils.execute_command(tn, 'cat /data/config.ini')
search = re.search(name + '[^=]+=[\r\n\t ]([^\r\n\t ]+)', config)
if search is None:
return ''
else:
return search.group(1)
# Write to config
def write_to_config(name, value):
if read_from_config(name) == '':
parrot_utils.execute_command(tn, 'echo "' + name + ' = ' + value + '\" >> /data/config.ini')
else:
parrot_utils.execute_command(tn, 'sed -i "s/\(' + name + ' *= *\).*/\\1' + value + '/g" /data/config.ini')
# Check if vision framework is installed
def check_vision_installed():
du_opt = parrot_utils.execute_command(tn,'du -d 2 /data/video/opt')
return '/data/video/opt/arm/gst' in du_opt or '/data/video/opt/arm/lib' in du_opt or '/data/video/opt/arm/tidsp-binaries-23.i3.8' in du_opt
# Check if the vision framework is running
def check_vision_running():
du_opt = parrot_utils.execute_command(tn,'du -d 2 /opt')
return '/opt/arm/gst' in du_opt and '/opt/arm/lib' in du_opt and '/opt/arm/tidsp-binaries-23.i3.8' in du_opt
# Check if autoboot is installed
def check_autoboot():
check_update = parrot_utils.execute_command(tn,'grep "START_PAPARAZZI" /bin/check_update.sh')
wifi_setup = parrot_utils.execute_command(tn,'grep "BASE_ADRESS" /bin/wifi_setup.sh')
if "START_PAPARAZZI" in check_update and "BASE_ADRESS" in wifi_setup:
return True
else:
return False
# Check if custom wifi_setup script is installed
def check_wifi_setup():
check_wifi = parrot_utils.execute_command(tn,'grep "static_ip_address_base" /bin/wifi_setup.sh')
if "static_ip_address_base" in check_wifi:
return True
else:
return False
# Install the vision framework
def ardrone2_install_vision():
print('Uploading GST')
parrot_utils.uploadfile(ftp, "arm_light.tgz", file("bin/arm_light.tgz", "rb"))
print(parrot_utils.execute_command(tn,"cd /data/video && tar -xzf arm_light.tgz"))
print(parrot_utils.execute_command(tn,"rm -rf /data/video/arm_light.tgz"))
print('Now Starting Vision')
ardrone2_start_vision()
# Remove the vision framework
def ardrone2_remove_vision():
parrot_utils.execute_command(tn,"rm -rf /opt/arm")
parrot_utils.execute_command(tn,"rm -rf /lib/dsp")
parrot_utils.execute_command(tn,"rm -rf /data/video/opt")
# Start the vision framework
def ardrone2_start_vision():
# Mount the directories
parrot_utils.execute_command(tn,"mkdir -p /opt/arm")
parrot_utils.execute_command(tn,"mkdir -p /lib/dsp")
parrot_utils.execute_command(tn,"mount --bind /data/video/opt/arm /opt/arm")
parrot_utils.execute_command(tn,"mount --bind /data/video/opt/arm/lib/dsp /lib/dsp")
# Start The DSP programs
parrot_utils.execute_command(tn,"kill -9 `pidof program.elf`")
parrot_utils.execute_command(tn,"kill -9 `pidof gst-launch-0.10`")
parrot_utils.execute_command(tn,"export PATH=/opt/arm/gst/bin:$PATH")
parrot_utils.execute_command(tn,"export DSP_PATH=/opt/arm/tidsp-binaries-23.i3.8/")
parrot_utils.execute_command(tn,"/bin/dspbridge/cexec.out -T /opt/arm/tidsp-binaries-23.i3.8/baseimage.dof -v")
parrot_utils.execute_command(tn,"/bin/dspbridge/dynreg.out -r /opt/arm/tidsp-binaries-23.i3.8/m4venc_sn.dll64P -v")
# Show result
parrot_utils.execute_command(tn,"ls -altr /opt/arm/gst/bin")
# Install autoboot script
def ardrone2_install_autoboot():
print('Uploading autoboot script')
parrot_utils.uploadfile(ftp, "check_update.sh", file("ardrone2/check_update.sh", "rb"))
print(parrot_utils.execute_command(tn,"mv /data/video/check_update.sh /bin/check_update.sh"))
print(parrot_utils.execute_command(tn,"chmod 777 /bin/check_update.sh"))
# Install network script
def ardrone2_install_network_script():
print('Uploading Wifi script')
parrot_utils.uploadfile(ftp, "wifi_setup.sh", file("ardrone2/wifi_setup.sh", "rb"))
print(parrot_utils.execute_command(tn,"mv /data/video/wifi_setup.sh /bin/wifi_setup.sh"))
print(parrot_utils.execute_command(tn,"chmod 777 /bin/wifi_setup.sh"))
# Install olsr deamon
def ardrone2_install_olsrd():
print('Uploading olsr deamon')
parrot_utils.uploadfile(ftp, "olsrd", file("ardrone2/olsrd", "rb"))
parrot_utils.uploadfile(ftp, "olsrd.conf", file("ardrone2/olsrd.conf", "rb"))
print(parrot_utils.execute_command(tn,"mv /data/video/olsrd /bin/olsrd"))
print(parrot_utils.execute_command(tn,"chmod 777 /bin/olsrd"))
print(parrot_utils.execute_command(tn,"mkdir -p /etc/olsrd"))
print(parrot_utils.execute_command(tn,"mv /data/video/olsrd.conf /etc/olsrd"))
print(parrot_utils.execute_command(tn,"rm -f /var/run && ln -s /tmp /var/run")) # olsrd needs /var/run folder, symlinked to /tmp
# Set network SSID
def ardrone2_set_ssid(name):
write_to_config('ssid_single_player', name)
print('The network ID (SSID) of the ARDrone 2 is changed to ' + name)
# Set IP address
def ardrone2_set_ip_address(address):
splitted_ip = address.split(".")
write_to_config('static_ip_address_base', splitted_ip[0] + '.' + splitted_ip[1] + '.' + splitted_ip[2] + '.')
write_to_config('static_ip_address_probe', splitted_ip[3])
print('The IP Address of the ARDrone 2 is changed to ' + address)
# Set wifi mode (0: master, 1: ad-hoc, 2: managed, *: master)
def ardrone2_set_wifi_mode(mode):
modes = { 'master' : '0', 'ad-hoc' : '1', 'managed' : '2', 'ad-hoc-olsr' : '3' }
try:
val = modes[mode]
except:
print('Unexpected wifi mode, setting to master (default)')
val = modes['master']
write_to_config('wifi_mode', val)
print('The Wifi mode of the ARDrone2 is changed to ' + mode + ' (' + val + ')')
# Set network channel
def ardrone2_set_wifi_channel(chan):
write_to_config('wifi_channel', chan)
print('The network channel of the ARDrone 2 is changed to ' + chan)
def ardrone2_status():
config_ini = parrot_utils.execute_command(tn,'cat /data/config.ini')
print('======================== ARDrone 2 Status ========================')
print('Version:\t\t' + parrot_utils.check_version(tn, '/firmware'))
print('Host:\t\t\t' + args.host + ' (' + read_from_config('static_ip_address_base', config_ini) +
read_from_config('static_ip_address_probe', config_ini) + ' after boot)')
print('Currently running:\t' + parrot_utils.check_running(tn))
print('Serial number:\t\t' + read_from_config('drone_serial', config_ini))
print('Network id:\t\t' + read_from_config('ssid_single_player', config_ini))
print('Motor software:\t\t' +
read_from_config('motor1_soft', config_ini) + '\t' + read_from_config('motor2_soft', config_ini) + '\t' +
read_from_config('motor3_soft', config_ini) + '\t' + read_from_config('motor4_soft', config_ini))
print('Motor hardware:\t\t' +
read_from_config('motor1_hard', config_ini) + '\t' + read_from_config('motor2_hard', config_ini) + '\t' +
read_from_config('motor3_hard', config_ini) + '\t' + read_from_config('motor4_hard', config_ini))
autorun = {'': 'Native', '0': 'Native', '1': 'Paparazzi'}
if check_autoboot():
print('Autorun at start:\tInstalled booting ' + autorun[read_from_config('start_paparazzi', config_ini)])
else:
print('Autorun at start:\tNot installed')
# Check if the vision framework is installed and running
vision_framework = ""
if check_vision_installed():
vision_framework += "Installed"
if check_vision_running():
vision_framework += " and running"
print('Vision framework:\t' + vision_framework)
# Request the filesystem status
print('\n======================== Filesystem Status ========================')
print(parrot_utils.check_filesystem(tn))
# Parse the arguments
parser = argparse.ArgumentParser(description='ARDrone 2 python helper. Use ardrone2.py -h for help')
parser.add_argument('--host', metavar='HOST', default='192.168.1.1',
help='the ip address of ardrone2')
subparsers = parser.add_subparsers(title='Command to execute', metavar='command', dest='command')
# All the subcommands and arguments
subparsers.add_parser('status', help='Request the status of the ARDrone 2')
subparsers.add_parser('reboot', help='Reboot the ARDrone 2')
subparsers.add_parser('installvision', help='Install the vision framework')
subparser_upload_gst = subparsers.add_parser('upload_gst_module',
help='Upload, configure and move a gstreamer0.10 module libXXX.so')
subparser_upload_gst.add_argument('file', help='Filename of *.so module')
subparser_upload_and_run = subparsers.add_parser('upload_file_and_run', help='Upload and run software (for instance the Paparazzi autopilot)')
subparser_upload_and_run.add_argument('file', help='Filename of an executable')
subparser_upload_and_run.add_argument('folder', help='Destination subfolder (raw for Paparazzi autopilot)')
subparser_upload = subparsers.add_parser('upload_file', help='Upload a file to the ARDrone 2')
subparser_upload.add_argument('file', help='Filename')
subparser_upload.add_argument('folder', help='Destination subfolder (base destination folder is /data/video)')
subparser_download = subparsers.add_parser('download_file', help='Download a file from the ARDrone 2')
subparser_download.add_argument('file', help='Filename (with the path on the local machine)')
subparser_download.add_argument('folder', help='Remote subfolder (base folder is /data/video)')
subparser_download_dir = subparsers.add_parser('download_dir', help='Download all files from a folder from the ARDrone 2')
subparser_download_dir.add_argument('dest', help='destination folder (on the local machine)')
subparser_download_dir.add_argument('folder', help='Remote subfolder (base folder is /data/video)')
subparser_rm_dir = subparsers.add_parser('rm_dir', help='Remove a directory and all its files from the ARDrone 2')
subparser_rm_dir.add_argument('folder', help='Remote subfolder (base folder is /data/video)')
subparser_insmod = subparsers.add_parser('insmod', help='Upload and insert kernel module')
subparser_insmod.add_argument('file', help='Filename of *.ko kernel module')
subparsers.add_parser('startvision', help='Start the vision framework')
subparser_start = subparsers.add_parser('start', help='Start a program on the ARDrone 2')
subparser_start.add_argument('program', help='the program to start')
subparser_kill = subparsers.add_parser('kill', help='Kill a program on the ARDrone 2')
subparser_kill.add_argument('program', help='the program to kill')
subparser_networkid = subparsers.add_parser('networkid', help='Set the network ID(SSID) of the ARDrone 2')
subparser_networkid.add_argument('name', help='the new network ID(SSID)')
subparser_ipaddress = subparsers.add_parser('ipaddress', help='Set the IP address of the ARDrone 2')
subparser_ipaddress.add_argument('address', help='the new IP address')
subparser_wifimode = subparsers.add_parser('wifimode', help='Set the Wifi mode the ARDrone 2')
subparser_wifimode.add_argument('mode', help='the new Wifi mode', choices=['master', 'ad-hoc', 'managed', 'ad-hoc-olsr'])
subparser_configure_network = subparsers.add_parser('configure_network', help='Configure the network on the ARDrone 2')
subparser_configure_network.add_argument('name', help='the new network ID(SSID)')
subparser_configure_network.add_argument('address', help='the new IP address')
subparser_configure_network.add_argument('mode', help='the new Wifi mode', choices=['master', 'ad-hoc', 'managed', 'ad-hoc-olsr'])
subparser_configure_network.add_argument('--channel', help='the wifi channel (auto or 1 to 11)', default='auto')
subparser_install_autostart = subparsers.add_parser('install_autostart', help='Install custom autostart script and set what to start on boot for the ARDrone 2')
subparser_install_autostart.add_argument('type', choices=['native', 'paparazzi'],
help='what to start on boot')
subparser_autostart = subparsers.add_parser('autostart', help='Set what to start on boot for the ARDrone 2')
subparser_autostart.add_argument('type', choices=['native', 'paparazzi'],
help='what to start on boot')
args = parser.parse_args()
# Connect with telnet and ftp
tn, ftp = parrot_utils.connect(args.host)
# Check the ARDrone 2 status
if args.command == 'status':
ardrone2_status()
# Reboot the drone
elif args.command == 'reboot':
parrot_utils.reboot(tn)
print('The ARDrone 2 is rebooting...')
# Kill a program
elif args.command == 'kill':
parrot_utils.execute_command(tn,'killall -9 ' + args.program)
print('Program "' + args.program + '" is now killed')
# Start a program
elif args.command == 'start':
parrot_utils.execute_command(tn,args.start + ' &')
print('Program "' + args.start + '" is now started')
# Change the network ID
elif args.command == 'networkid':
ardrone2_set_ssid(args.name)
if raw_input("Shall I restart the ARDrone 2? (y/N) ").lower() == 'y':
parrot_utils.reboot(tn)
# Change the IP address
elif args.command == 'ipaddress':
ardrone2_set_ip_address(args.address)
if raw_input("Shall I restart the ARDrone 2? (y/N) ").lower() == 'y':
parrot_utils.reboot(tn)
# Change the wifi mode
elif args.command == 'wifimode':
ardrone2_set_wifi_mode(args.mode)
if raw_input("Shall I restart the ARDrone 2? (y/N) ").lower() == 'y':
parrot_utils.reboot(tn)
# Install and configure network
elif args.command == 'configure_network':
config_ini = parrot_utils.execute_command(tn,'cat /data/config.ini')
print('=== Current network setup ===')
print('Network id:\t' + read_from_config('ssid_single_player', config_ini))
print('Host:\t\t' + args.host + ' (' + read_from_config('static_ip_address_base', config_ini) +
read_from_config('static_ip_address_probe', config_ini) + ' after boot)')
print('Mode:\t\t' + read_from_config('wifi_mode', config_ini))
print('Channel:\t' + read_from_config('wifi_channel', config_ini))
print('=============================')
if check_wifi_setup():
print('Custom Wifi script already installed')
if raw_input("Shall I reinstall the Wifi script (y/N) ").lower() == 'y':
ardrone2_install_network_script()
else:
if raw_input("Shall I install custom Wifi script (recommanded) (y/N) ").lower() == 'y':
ardrone2_install_network_script()
if raw_input("Shall I install olsrd (ad-hoc wireless mesh routing deamon) (y/N) ").lower() == 'y':
ardrone2_install_olsrd()
ardrone2_set_ssid(args.name)
ardrone2_set_ip_address(args.address)
ardrone2_set_wifi_mode(args.mode)
ardrone2_set_wifi_channel(args.channel)
config_ini = parrot_utils.execute_command(tn,'cat /data/config.ini')
print('== New network setup after boot ==')
print('Network id:\t' + read_from_config('ssid_single_player', config_ini))
print('Host:\t\t' + read_from_config('static_ip_address_base', config_ini) +
read_from_config('static_ip_address_probe', config_ini))
print('Mode:\t\t' + read_from_config('wifi_mode', config_ini))
print('Channel:\t' + read_from_config('wifi_channel', config_ini))
print('==================================')
if raw_input("Shall I restart the ARDrone 2? (y/N) ").lower() == 'y':
parrot_utils.reboot(tn)
# Install and configure autostart
elif args.command == 'install_autostart':
if check_autoboot():
print('Custom autostart script already installed')
if raw_input("Shall I reinstall the autostart script (y/N) ").lower() == 'y':
ardrone2_install_autoboot()
else:
ardrone2_install_autoboot()
autorun = {'native': '0', 'paparazzi': '1'}
write_to_config('start_paparazzi', autorun[args.type])
print('The autostart on boot is changed to ' + args.type)
if raw_input("Shall I restart the ARDrone 2? (y/N) ").lower() == 'y':
parrot_utils.reboot(tn)
# Change the autostart
elif args.command == 'autostart':
autorun = {'native': '0', 'paparazzi': '1'}
write_to_config('start_paparazzi', autorun[args.type])
print('The autostart on boot is changed to ' + args.type)
# Install Vision framework
elif args.command == 'installvision':
if check_vision_installed():
print('Vision framework already installed')
if raw_input("Shall I reinstall the vision framework? (y/N) ").lower() == 'y':
ardrone2_remove_vision()
ardrone2_install_vision()
ardrone2_install_vision()
print('Vision framework installed')
# Start Vision framework
elif args.command == 'startvision':
if check_vision_running():
print('Vision framework already started')
else:
if not check_vision_installed():
print('No vision framework installed')
if raw_input("Shall I install the vision framework? (y/N) ").lower() == 'y':
ardrone2_install_vision()
if check_vision_installed():
ardrone2_start_vision()
print('Vision framework started')
elif args.command == 'upload_gst_module':
print('Uploading ...' + args.file)
parrot_utils.uploadfile(ftp, args.file, file(args.file, "rb"))
parrot_utils.execute_command(tn,"chmod 777 /data/video/" + args.file)
parrot_utils.execute_command(tn,"mv /data/video/" + args.file + " /data/video/opt/arm/gst/lib/gstreamer-0.10")
if check_vision_running():
print('Info: Vision framework already started')
else:
if not check_vision_installed():
print('Warning: No vision framework installed')
if raw_input("Warning: Shall I install the vision framework? (y/N) ").lower() == 'y':
ardrone2_install_vision()
if check_vision_installed():
ardrone2_start_vision()
print('#pragma message: Vision framework started')
print('#pragma message: Vision Plugin Uploaded and DSP Started.')
elif args.command == 'insmod':
modfile = parrot_utils.split_into_path_and_file(args.file)
print('Uploading \'' + modfile[1])
parrot_utils.uploadfile(ftp, modfile[1], file(args.file, "rb"))
print(parrot_utils.execute_command(tn,"insmod /data/video/" + modfile[1]))
elif args.command == 'upload_file_and_run':
# Split filename and path
f = parrot_utils.split_into_path_and_file(args.file)
print("Kill running " + f[1] + " and make folder " + args.folder)
parrot_utils.execute_command(tn,"killall -9 " + f[1])
sleep(1)
parrot_utils.execute_command(tn, "mkdir -p /data/video/" + args.folder)
print('Uploading \'' + f[1] + "\' from " + f[0] + " to " + args.folder)
parrot_utils.uploadfile(ftp, args.folder + "/" + f[1], file(args.file, "rb"))
sleep(0.5)
parrot_utils.execute_command(tn, "chmod 777 /data/video/" + args.folder + "/" + f[1])
parrot_utils.execute_command(tn, "/data/video/" + args.folder + "/" + f[1] + " > /dev/null 2>&1 &")
print("#pragma message: Upload and Start of ap.elf to ARDrone2 Succes!")
elif args.command == 'upload_file':
# Split filename and path
f = parrot_utils.split_into_path_and_file(args.file)
parrot_utils.execute_command(tn,"mkdir -p /data/video/" + args.folder)
print('Uploading \'' + f[1] + "\' from " + f[0] + " to /data/video/" + args.folder)
parrot_utils.uploadfile(ftp, args.folder + "/" + f[1], file(args.file, "rb"))
print("#pragma message: Upload of " + f[1] + " to ARDrone2 Succes!")
elif args.command == 'download_file':
# Split filename and path
f = parrot_utils.split_into_path_and_file(args.file)
# Open file and download
try:
file = open(args.file, 'wb')
print('Downloading \'' + f[1] + "\' from " + args.folder + " to " + f[0])
ftp.retrbinary("RETR " + args.folder + "/" + f[1], file.write)
print("#pragma message: Download of " + f[1] + " from ARDrone2 Succes!")
except IOError:
print("#pragma message: Fail to open file " + args.file)
except:
os.remove(args.file)
print("#pragma message: Download of " + f[1] + " from ARDrone2 Failed!")
else:
file.close()
elif args.command == 'download_dir':
# Split filename and path
files = parrot_utils.execute_command(tn, 'find /data/video/' + args.folder + ' -name \'*.*\'')
# Create dest dir if needed
if not os.path.exists(args.dest):
os.mkdir(args.dest)
# Open file and download
for f in files.split():
file_name = parrot_utils.split_into_path_and_file(f)
file_source = args.folder + '/' + file_name[1]
file_dest = args.dest + '/' + file_name[1]
try:
file = open(file_dest, 'wb')
print('Downloading \'' + f + "\' to " + file_dest)
ftp.retrbinary("RETR " + file_source, file.write)
except IOError:
print("#pragma message: Fail to open file " + file_dest)
except:
os.remove(file_dest)
print("#pragma message: Download of " + f + " from ARDrone2 Failed!")
else:
file.close()
print("#pragma message: End download of folder " + args.folder + " from ARDrone2")
elif args.command == 'rm_dir':
# Split filename and path
print("Deleting folder /data/video/" + args.folder + " from ARDrone2")
print(parrot_utils.execute_command(tn, 'rm -r /data/video/' + args.folder))
# Close the telnet and python script
parrot_utils.disconnect(tn, ftp)
exit(0)
| gpl-2.0 |
onceuponatimeforever/oh-mainline | vendor/packages/sqlparse/tests/test_grouping.py | 16 | 13177 | # -*- coding: utf-8 -*-
import pytest
import sqlparse
from sqlparse import sql
from sqlparse import tokens as T
from tests.utils import TestCaseBase
class TestGrouping(TestCaseBase):
def test_parenthesis(self):
s = 'select (select (x3) x2) and (y2) bar'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, str(parsed))
self.assertEqual(len(parsed.tokens), 9)
self.assert_(isinstance(parsed.tokens[2], sql.Parenthesis))
self.assert_(isinstance(parsed.tokens[-3], sql.Parenthesis))
self.assertEqual(len(parsed.tokens[2].tokens), 7)
self.assert_(isinstance(parsed.tokens[2].tokens[3], sql.Parenthesis))
self.assertEqual(len(parsed.tokens[2].tokens[3].tokens), 3)
def test_comments(self):
s = '/*\n * foo\n */ \n bar'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assertEqual(len(parsed.tokens), 2)
def test_assignment(self):
s = 'foo := 1;'
parsed = sqlparse.parse(s)[0]
self.assertEqual(len(parsed.tokens), 1)
self.assert_(isinstance(parsed.tokens[0], sql.Assignment))
s = 'foo := 1'
parsed = sqlparse.parse(s)[0]
self.assertEqual(len(parsed.tokens), 1)
self.assert_(isinstance(parsed.tokens[0], sql.Assignment))
def test_identifiers(self):
s = 'select foo.bar from "myscheme"."table" where fail. order'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assert_(isinstance(parsed.tokens[2], sql.Identifier))
self.assert_(isinstance(parsed.tokens[6], sql.Identifier))
self.assert_(isinstance(parsed.tokens[8], sql.Where))
s = 'select * from foo where foo.id = 1'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assert_(isinstance(parsed.tokens[-1].tokens[-1].tokens[0],
sql.Identifier))
s = 'select * from (select "foo"."id" from foo)'
parsed = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(parsed))
self.assert_(isinstance(parsed.tokens[-1].tokens[3], sql.Identifier))
s = "INSERT INTO `test` VALUES('foo', 'bar');"
parsed = sqlparse.parse(s)[0]
types = [l.ttype for l in parsed.tokens if not l.is_whitespace()]
self.assertEquals(types, [T.DML, T.Keyword, None,
T.Keyword, None, T.Punctuation])
s = "select 1.0*(a+b) as col, sum(c)/sum(d) from myschema.mytable"
parsed = sqlparse.parse(s)[0]
self.assertEqual(len(parsed.tokens), 7)
self.assert_(isinstance(parsed.tokens[2], sql.IdentifierList))
self.assertEqual(len(parsed.tokens[2].tokens), 4)
identifiers = list(parsed.tokens[2].get_identifiers())
self.assertEqual(len(identifiers), 2)
self.assertEquals(identifiers[0].get_alias(), u"col")
def test_identifier_wildcard(self):
p = sqlparse.parse('a.*, b.id')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
self.assert_(isinstance(p.tokens[0].tokens[0], sql.Identifier))
self.assert_(isinstance(p.tokens[0].tokens[-1], sql.Identifier))
def test_identifier_name_wildcard(self):
p = sqlparse.parse('a.*')[0]
t = p.tokens[0]
self.assertEqual(t.get_name(), '*')
self.assertEqual(t.is_wildcard(), True)
def test_identifier_invalid(self):
p = sqlparse.parse('a.')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assertEqual(p.tokens[0].has_alias(), False)
self.assertEqual(p.tokens[0].get_name(), None)
self.assertEqual(p.tokens[0].get_real_name(), None)
self.assertEqual(p.tokens[0].get_parent_name(), 'a')
def test_identifier_as_invalid(self): # issue8
p = sqlparse.parse('foo as select *')[0]
self.assert_(len(p.tokens), 5)
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assertEqual(len(p.tokens[0].tokens), 1)
self.assertEqual(p.tokens[2].ttype, T.Keyword)
def test_identifier_function(self):
p = sqlparse.parse('foo() as bar')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assert_(isinstance(p.tokens[0].tokens[0], sql.Function))
p = sqlparse.parse('foo()||col2 bar')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
self.assert_(isinstance(p.tokens[0].tokens[0], sql.Function))
def test_identifier_extended(self): # issue 15
p = sqlparse.parse('foo+100')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
p = sqlparse.parse('foo + 100')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
p = sqlparse.parse('foo*100')[0]
self.assert_(isinstance(p.tokens[0], sql.Identifier))
def test_identifier_list(self):
p = sqlparse.parse('a, b, c')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
p = sqlparse.parse('(a, b, c)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.IdentifierList))
def test_identifier_list_case(self):
p = sqlparse.parse('a, case when 1 then 2 else 3 end as b, c')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
p = sqlparse.parse('(a, case when 1 then 2 else 3 end as b, c)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.IdentifierList))
def test_identifier_list_other(self): # issue2
p = sqlparse.parse("select *, null, 1, 'foo', bar from mytable, x")[0]
self.assert_(isinstance(p.tokens[2], sql.IdentifierList))
l = p.tokens[2]
self.assertEqual(len(l.tokens), 13)
def test_where(self):
s = 'select * from foo where bar = 1 order by id desc'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertTrue(len(p.tokens), 16)
s = 'select x from (select y from foo where bar = 1) z'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertTrue(isinstance(p.tokens[-3].tokens[-2], sql.Where))
def test_typecast(self):
s = 'select foo::integer from bar'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_typecast(), 'integer')
self.assertEqual(p.tokens[2].get_name(), 'foo')
s = 'select (current_database())::information_schema.sql_identifier'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_typecast(),
'information_schema.sql_identifier')
def test_alias(self):
s = 'select foo as bar from mytable'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_real_name(), 'foo')
self.assertEqual(p.tokens[2].get_alias(), 'bar')
s = 'select foo from mytable t1'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[6].get_real_name(), 'mytable')
self.assertEqual(p.tokens[6].get_alias(), 't1')
s = 'select foo::integer as bar from mytable'
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[2].get_alias(), 'bar')
s = ('SELECT DISTINCT '
'(current_database())::information_schema.sql_identifier AS view')
p = sqlparse.parse(s)[0]
self.ndiffAssertEqual(s, unicode(p))
self.assertEqual(p.tokens[4].get_alias(), 'view')
def test_alias_case(self): # see issue46
p = sqlparse.parse('CASE WHEN 1 THEN 2 ELSE 3 END foo')[0]
self.assertEqual(len(p.tokens), 1)
self.assertEqual(p.tokens[0].get_alias(), 'foo')
def test_idlist_function(self): # see issue10 too
p = sqlparse.parse('foo(1) x, bar')[0]
self.assert_(isinstance(p.tokens[0], sql.IdentifierList))
def test_comparison_exclude(self):
# make sure operators are not handled too lazy
p = sqlparse.parse('(=)')[0]
self.assert_(isinstance(p.tokens[0], sql.Parenthesis))
self.assert_(not isinstance(p.tokens[0].tokens[1], sql.Comparison))
p = sqlparse.parse('(a=1)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.Comparison))
p = sqlparse.parse('(a>=1)')[0]
self.assert_(isinstance(p.tokens[0].tokens[1], sql.Comparison))
def test_function(self):
p = sqlparse.parse('foo()')[0]
self.assert_(isinstance(p.tokens[0], sql.Function))
p = sqlparse.parse('foo(null, bar)')[0]
self.assert_(isinstance(p.tokens[0], sql.Function))
self.assertEqual(len(list(p.tokens[0].get_parameters())), 2)
def test_varchar(self):
p = sqlparse.parse('"text" Varchar(50) NOT NULL')[0]
self.assert_(isinstance(p.tokens[2], sql.Function))
class TestStatement(TestCaseBase):
def test_get_type(self):
f = lambda sql: sqlparse.parse(sql)[0]
self.assertEqual(f('select * from foo').get_type(), 'SELECT')
self.assertEqual(f('update foo').get_type(), 'UPDATE')
self.assertEqual(f(' update foo').get_type(), 'UPDATE')
self.assertEqual(f('\nupdate foo').get_type(), 'UPDATE')
self.assertEqual(f('foo').get_type(), 'UNKNOWN')
# Statements that have a whitespace after the closing semicolon
# are parsed as two statements where later only consists of the
# trailing whitespace.
self.assertEqual(f('\n').get_type(), 'UNKNOWN')
def test_identifier_with_operators(): # issue 53
p = sqlparse.parse('foo||bar')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Identifier)
# again with whitespaces
p = sqlparse.parse('foo || bar')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Identifier)
def test_identifier_with_op_trailing_ws():
# make sure trailing whitespace isn't grouped with identifier
p = sqlparse.parse('foo || bar ')[0]
assert len(p.tokens) == 2
assert isinstance(p.tokens[0], sql.Identifier)
assert p.tokens[1].ttype is T.Whitespace
def test_identifier_with_string_literals():
p = sqlparse.parse('foo + \'bar\'')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Identifier)
# This test seems to be wrong. It was introduced when fixing #53, but #111
# showed that this shouldn't be an identifier at all. I'm leaving this
# commented in the source for a while.
# def test_identifier_string_concat():
# p = sqlparse.parse('\'foo\' || bar')[0]
# assert len(p.tokens) == 1
# assert isinstance(p.tokens[0], sql.Identifier)
def test_identifier_consumes_ordering(): # issue89
p = sqlparse.parse('select * from foo order by c1 desc, c2, c3')[0]
assert isinstance(p.tokens[-1], sql.IdentifierList)
ids = list(p.tokens[-1].get_identifiers())
assert len(ids) == 3
assert ids[0].get_name() == 'c1'
assert ids[0].get_ordering() == 'DESC'
assert ids[1].get_name() == 'c2'
assert ids[1].get_ordering() is None
def test_comparison_with_keywords(): # issue90
# in fact these are assignments, but for now we don't distinguish them
p = sqlparse.parse('foo = NULL')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
assert len(p.tokens[0].tokens) == 5
assert p.tokens[0].left.value == 'foo'
assert p.tokens[0].right.value == 'NULL'
# make sure it's case-insensitive
p = sqlparse.parse('foo = null')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
def test_comparison_with_parenthesis(): # issue23
p = sqlparse.parse('(3 + 4) = 7')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Comparison)
comp = p.tokens[0]
assert isinstance(comp.left, sql.Parenthesis)
assert comp.right.ttype is T.Number.Integer
@pytest.mark.parametrize('start', ['FOR', 'FOREACH'])
def test_forloops(start):
p = sqlparse.parse('%s foo in bar LOOP foobar END LOOP' % start)[0]
assert (len(p.tokens)) == 1
assert isinstance(p.tokens[0], sql.For)
def test_nested_for():
p = sqlparse.parse('FOR foo LOOP FOR bar LOOP END LOOP END LOOP')[0]
assert len(p.tokens) == 1
for1 = p.tokens[0]
assert for1.tokens[0].value == 'FOR'
assert for1.tokens[-1].value == 'END LOOP'
for2 = for1.tokens[6]
assert isinstance(for2, sql.For)
assert for2.tokens[0].value == 'FOR'
assert for2.tokens[-1].value == 'END LOOP'
def test_begin():
p = sqlparse.parse('BEGIN foo END')[0]
assert len(p.tokens) == 1
assert isinstance(p.tokens[0], sql.Begin)
def test_nested_begin():
p = sqlparse.parse('BEGIN foo BEGIN bar END END')[0]
assert len(p.tokens) == 1
outer = p.tokens[0]
assert outer.tokens[0].value == 'BEGIN'
assert outer.tokens[-1].value == 'END'
inner = outer.tokens[4]
assert inner.tokens[0].value == 'BEGIN'
assert inner.tokens[-1].value == 'END'
assert isinstance(inner, sql.Begin)
| agpl-3.0 |
undoware/neutron-drive | google_appengine/lib/django_1_2/tests/modeltests/validation/test_unique.py | 38 | 6508 | import unittest
import datetime
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import connection
from models import CustomPKModel, UniqueTogetherModel, UniqueFieldsModel, UniqueForDateModel, ModelToValidate, Post, FlexibleDatePost
class GetUniqueCheckTests(unittest.TestCase):
def test_unique_fields_get_collected(self):
m = UniqueFieldsModel()
self.assertEqual(
([(UniqueFieldsModel, ('id',)),
(UniqueFieldsModel, ('unique_charfield',)),
(UniqueFieldsModel, ('unique_integerfield',))],
[]),
m._get_unique_checks()
)
def test_unique_together_gets_picked_up_and_converted_to_tuple(self):
m = UniqueTogetherModel()
self.assertEqual(
([(UniqueTogetherModel, ('ifield', 'cfield',)),
(UniqueTogetherModel, ('ifield', 'efield')),
(UniqueTogetherModel, ('id',)), ],
[]),
m._get_unique_checks()
)
def test_primary_key_is_considered_unique(self):
m = CustomPKModel()
self.assertEqual(([(CustomPKModel, ('my_pk_field',))], []), m._get_unique_checks())
def test_unique_for_date_gets_picked_up(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'date', 'count', 'start_date'),
(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks()
)
def test_unique_for_date_exclusion(self):
m = UniqueForDateModel()
self.assertEqual((
[(UniqueForDateModel, ('id',))],
[(UniqueForDateModel, 'year', 'count', 'end_date'),
(UniqueForDateModel, 'month', 'order', 'end_date')]
), m._get_unique_checks(exclude='start_date')
)
class PerformUniqueChecksTest(unittest.TestCase):
def setUp(self):
# Set debug to True to gain access to connection.queries.
self._old_debug, settings.DEBUG = settings.DEBUG, True
super(PerformUniqueChecksTest, self).setUp()
def tearDown(self):
# Restore old debug value.
settings.DEBUG = self._old_debug
super(PerformUniqueChecksTest, self).tearDown()
def test_primary_key_unique_check_not_performed_when_adding_and_pk_not_specified(self):
# Regression test for #12560
query_count = len(connection.queries)
mtv = ModelToValidate(number=10, name='Some Name')
setattr(mtv, '_adding', True)
mtv.full_clean()
self.assertEqual(query_count, len(connection.queries))
def test_primary_key_unique_check_performed_when_adding_and_pk_specified(self):
# Regression test for #12560
query_count = len(connection.queries)
mtv = ModelToValidate(number=10, name='Some Name', id=123)
setattr(mtv, '_adding', True)
mtv.full_clean()
self.assertEqual(query_count + 1, len(connection.queries))
def test_primary_key_unique_check_not_performed_when_not_adding(self):
# Regression test for #12132
query_count= len(connection.queries)
mtv = ModelToValidate(number=10, name='Some Name')
mtv.full_clean()
self.assertEqual(query_count, len(connection.queries))
def test_unique_for_date(self):
p1 = Post.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = Post(title="Django 1.0 is released", posted=datetime.date(2008, 9, 3))
try:
p.full_clean()
except ValidationError, e:
self.assertEqual(e.message_dict, {'title': [u'Title must be unique for Posted date.']})
else:
self.fail('unique_for_date checks should catch this.')
# Should work without errors
p = Post(title="Work on Django 1.1 begins", posted=datetime.date(2008, 9, 3))
p.full_clean()
# Should work without errors
p = Post(title="Django 1.0 is released", posted=datetime.datetime(2008, 9,4))
p.full_clean()
p = Post(slug="Django 1.0", posted=datetime.datetime(2008, 1, 1))
try:
p.full_clean()
except ValidationError, e:
self.assertEqual(e.message_dict, {'slug': [u'Slug must be unique for Posted year.']})
else:
self.fail('unique_for_year checks should catch this.')
p = Post(subtitle="Finally", posted=datetime.datetime(2008, 9, 30))
try:
p.full_clean()
except ValidationError, e:
self.assertEqual(e.message_dict, {'subtitle': [u'Subtitle must be unique for Posted month.']})
else:
self.fail('unique_for_month checks should catch this.')
p = Post(title="Django 1.0 is released")
try:
p.full_clean()
except ValidationError, e:
self.assertEqual(e.message_dict, {'posted': [u'This field cannot be null.']})
else:
self.fail("Model validation shouldn't allow an absent value for a DateField without null=True.")
def test_unique_for_date_with_nullable_date(self):
p1 = FlexibleDatePost.objects.create(title="Django 1.0 is released",
slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3))
p = FlexibleDatePost(title="Django 1.0 is released")
try:
p.full_clean()
except ValidationError, e:
self.fail("unique_for_date checks shouldn't trigger when the associated DateField is None.")
except:
self.fail("unique_for_date checks shouldn't explode when the associated DateField is None.")
p = FlexibleDatePost(slug="Django 1.0")
try:
p.full_clean()
except ValidationError, e:
self.fail("unique_for_year checks shouldn't trigger when the associated DateField is None.")
except:
self.fail("unique_for_year checks shouldn't explode when the associated DateField is None.")
p = FlexibleDatePost(subtitle="Finally")
try:
p.full_clean()
except ValidationError, e:
self.fail("unique_for_month checks shouldn't trigger when the associated DateField is None.")
except:
self.fail("unique_for_month checks shouldn't explode when the associated DateField is None.")
| bsd-3-clause |
olapaola/olapaola-android-scripting | python/src/Lib/plat-mac/macerrors.py | 33 | 116661 | # -coding=latin1-
from warnings import warnpy3k
warnpy3k("In 3.x, the macerrors module is removed.", stacklevel=2)
svTempDisable = -32768 #svTempDisable
svDisabled = -32640 #Reserve range -32640 to -32768 for Apple temp disables.
fontNotOutlineErr = -32615 #bitmap font passed to routine that does outlines only
kURL68kNotSupportedError = -30788 #kURL68kNotSupportedError
kURLAccessNotAvailableError = -30787 #kURLAccessNotAvailableError
kURLInvalidConfigurationError = -30786 #kURLInvalidConfigurationError
kURLExtensionFailureError = -30785 #kURLExtensionFailureError
kURLFileEmptyError = -30783 #kURLFileEmptyError
kURLInvalidCallError = -30781 #kURLInvalidCallError
kURLUnsettablePropertyError = -30780 #kURLUnsettablePropertyError
kURLPropertyBufferTooSmallError = -30779 #kURLPropertyBufferTooSmallError
kURLUnknownPropertyError = -30778 #kURLUnknownPropertyError
kURLPropertyNotYetKnownError = -30777 #kURLPropertyNotYetKnownError
kURLAuthenticationError = -30776 #kURLAuthenticationError
kURLServerBusyError = -30775 #kURLServerBusyError
kURLUnsupportedSchemeError = -30774 #kURLUnsupportedSchemeError
kURLInvalidURLError = -30773 #kURLInvalidURLError
kURLDestinationExistsError = -30772 #kURLDestinationExistsError
kURLProgressAlreadyDisplayedError = -30771 #kURLProgressAlreadyDisplayedError
kURLInvalidURLReferenceError = -30770 #kURLInvalidURLReferenceError
controlHandleInvalidErr = -30599 #controlHandleInvalidErr
controlInvalidDataVersionErr = -30597 #controlInvalidDataVersionErr
errItemNotControl = -30596 #errItemNotControl
errCantEmbedRoot = -30595 #errCantEmbedRoot
errCantEmbedIntoSelf = -30594 #errCantEmbedIntoSelf
errWindowRegionCodeInvalid = -30593 #errWindowRegionCodeInvalid
errControlHiddenOrDisabled = -30592 #errControlHiddenOrDisabled
errDataSizeMismatch = -30591 #errDataSizeMismatch
errControlIsNotEmbedder = -30590 #errControlIsNotEmbedder
errControlsAlreadyExist = -30589 #errControlsAlreadyExist
errInvalidPartCode = -30588 #errInvalidPartCode
errRootAlreadyExists = -30587 #errRootAlreadyExists
errNoRootControl = -30586 #errNoRootControl
errCouldntSetFocus = -30585 #errCouldntSetFocus
errUnknownControl = -30584 #errUnknownControl
errWindowDoesntSupportFocus = -30583 #errWindowDoesntSupportFocus
errControlDoesntSupportFocus = -30582 #errControlDoesntSupportFocus
errDataNotSupported = -30581 #errDataNotSupported
errMessageNotSupported = -30580 #errMessageNotSupported
themeMonitorDepthNotSupportedErr = -30567 #theme not supported at monitor depth
themeScriptFontNotFoundErr = -30566 #theme font requested for uninstalled script system
themeBadCursorIndexErr = -30565 #themeBadCursorIndexErr
themeHasNoAccentsErr = -30564 #themeHasNoAccentsErr
themeBadTextColorErr = -30563 #themeBadTextColorErr
themeProcessNotRegisteredErr = -30562 #themeProcessNotRegisteredErr
themeProcessRegisteredErr = -30561 #themeProcessRegisteredErr
themeInvalidBrushErr = -30560 #pattern index invalid
qtvrUninitialized = -30555 #qtvrUninitialized
qtvrLibraryLoadErr = -30554 #qtvrLibraryLoadErr
streamingNodeNotReadyErr = -30553 #streamingNodeNotReadyErr
noMemoryNodeFailedInitialize = -30552 #noMemoryNodeFailedInitialize
invalidHotSpotIDErr = -30551 #invalidHotSpotIDErr
invalidNodeFormatErr = -30550 #invalidNodeFormatErr
limitReachedErr = -30549 #limitReachedErr
settingNotSupportedByNodeErr = -30548 #settingNotSupportedByNodeErr
propertyNotSupportedByNodeErr = -30547 #propertyNotSupportedByNodeErr
timeNotInViewErr = -30546 #timeNotInViewErr
invalidViewStateErr = -30545 #invalidViewStateErr
invalidNodeIDErr = -30544 #invalidNodeIDErr
selectorNotSupportedByNodeErr = -30543 #selectorNotSupportedByNodeErr
callNotSupportedByNodeErr = -30542 #callNotSupportedByNodeErr
constraintReachedErr = -30541 #constraintReachedErr
notAQTVRMovieErr = -30540 #notAQTVRMovieErr
kFBCnoSuchHit = -30532 #kFBCnoSuchHit
kFBCbadSearchSession = -30531 #kFBCbadSearchSession
kFBCindexDiskIOFailed = -30530 #kFBCindexDiskIOFailed
kFBCsummarizationCanceled = -30529 #kFBCsummarizationCanceled
kFBCbadIndexFileVersion = -30528 #kFBCbadIndexFileVersion
kFBCanalysisNotAvailable = -30527 #kFBCanalysisNotAvailable
kFBCillegalSessionChange = -30526 #tried to add/remove vols to a session
kFBCsomeFilesNotIndexed = -30525 #kFBCsomeFilesNotIndexed
kFBCsearchFailed = -30524 #kFBCsearchFailed
kFBCindexNotAvailable = -30523 #kFBCindexNotAvailable
kFBCindexFileDestroyed = -30522 #kFBCindexFileDestroyed
kFBCaccessCanceled = -30521 #kFBCaccessCanceled
kFBCindexingCanceled = -30520 #kFBCindexingCanceled
kFBCnoSearchSession = -30519 #kFBCnoSearchSession
kFBCindexNotFound = -30518 #kFBCindexNotFound
kFBCflushFailed = -30517 #kFBCflushFailed
kFBCaddDocFailed = -30516 #kFBCaddDocFailed
kFBCaccessorStoreFailed = -30515 #kFBCaccessorStoreFailed
kFBCindexCreationFailed = -30514 #couldn't create index
kFBCmergingFailed = -30513 #couldn't merge index files
kFBCtokenizationFailed = -30512 #couldn't read from document or query
kFBCmoveFailed = -30511 #V-Twin exception caught
kFBCdeletionFailed = -30510 #V-Twin exception caught
kFBCcommitFailed = -30509 #V-Twin exception caught
kFBCindexingFailed = -30508 #V-Twin exception caught
kFBCvalidationFailed = -30507 #V-Twin exception caught
kFBCcompactionFailed = -30506 #V-Twin exception caught
kFBCbadIndexFile = -30505 #bad FSSpec, or bad data in file
kFBCfileNotIndexed = -30504 #kFBCfileNotIndexed
kFBCbadParam = -30503 #kFBCbadParam
kFBCallocFailed = -30502 #probably low memory
kFBCnoIndexesFound = -30501 #kFBCnoIndexesFound
kFBCvTwinExceptionErr = -30500 #no telling what it was
kDSpStereoContextErr = -30450 #kDSpStereoContextErr
kDSpInternalErr = -30449 #kDSpInternalErr
kDSpConfirmSwitchWarning = -30448 #kDSpConfirmSwitchWarning
kDSpFrameRateNotReadyErr = -30447 #kDSpFrameRateNotReadyErr
kDSpContextNotFoundErr = -30446 #kDSpContextNotFoundErr
kDSpContextNotReservedErr = -30445 #kDSpContextNotReservedErr
kDSpContextAlreadyReservedErr = -30444 #kDSpContextAlreadyReservedErr
kDSpInvalidAttributesErr = -30443 #kDSpInvalidAttributesErr
kDSpInvalidContextErr = -30442 #kDSpInvalidContextErr
kDSpSystemSWTooOldErr = -30441 #kDSpSystemSWTooOldErr
kDSpNotInitializedErr = -30440 #kDSpNotInitializedErr
kISpListBusyErr = -30429 #kISpListBusyErr
kISpDeviceActiveErr = -30428 #kISpDeviceActiveErr
kISpSystemActiveErr = -30427 #kISpSystemActiveErr
kISpDeviceInactiveErr = -30426 #kISpDeviceInactiveErr
kISpSystemInactiveErr = -30425 #kISpSystemInactiveErr
kISpElementNotInListErr = -30424 #kISpElementNotInListErr
kISpElementInListErr = -30423 #kISpElementInListErr
kISpBufferToSmallErr = -30422 #kISpBufferToSmallErr
kISpSystemListErr = -30421 #kISpSystemListErr
kISpInternalErr = -30420 #kISpInternalErr
kNSpJoinFailedErr = -30399 #kNSpJoinFailedErr
kNSpCantBlockErr = -30398 #kNSpCantBlockErr
kNSpMessageTooBigErr = -30397 #kNSpMessageTooBigErr
kNSpSendFailedErr = -30396 #kNSpSendFailedErr
kNSpConnectFailedErr = -30395 #kNSpConnectFailedErr
kNSpGameTerminatedErr = -30394 #kNSpGameTerminatedErr
kNSpTimeoutErr = -30393 #kNSpTimeoutErr
kNSpInvalidProtocolListErr = -30392 #kNSpInvalidProtocolListErr
kNSpInvalidProtocolRefErr = -30391 #kNSpInvalidProtocolRefErr
kNSpInvalidDefinitionErr = -30390 #kNSpInvalidDefinitionErr
kNSpAddPlayerFailedErr = -30389 #kNSpAddPlayerFailedErr
kNSpCreateGroupFailedErr = -30388 #kNSpCreateGroupFailedErr
kNSpNoHostVolunteersErr = -30387 #kNSpNoHostVolunteersErr
kNSpNoGroupsErr = -30386 #kNSpNoGroupsErr
kNSpNoPlayersErr = -30385 #kNSpNoPlayersErr
kNSpInvalidGroupIDErr = -30384 #kNSpInvalidGroupIDErr
kNSpInvalidPlayerIDErr = -30383 #kNSpInvalidPlayerIDErr
kNSpNameRequiredErr = -30382 #kNSpNameRequiredErr
kNSpFeatureNotImplementedErr = -30381 #kNSpFeatureNotImplementedErr
kNSpAddressInUseErr = -30380 #kNSpAddressInUseErr
kNSpRemovePlayerFailedErr = -30379 #kNSpRemovePlayerFailedErr
kNSpFreeQExhaustedErr = -30378 #kNSpFreeQExhaustedErr
kNSpInvalidAddressErr = -30377 #kNSpInvalidAddressErr
kNSpNotAdvertisingErr = -30376 #kNSpNotAdvertisingErr
kNSpAlreadyAdvertisingErr = -30374 #kNSpAlreadyAdvertisingErr
kNSpMemAllocationErr = -30373 #kNSpMemAllocationErr
kNSpOTVersionTooOldErr = -30371 #kNSpOTVersionTooOldErr
kNSpOTNotPresentErr = -30370 #kNSpOTNotPresentErr
kNSpInvalidParameterErr = -30369 #kNSpInvalidParameterErr
kNSpInvalidGameRefErr = -30367 #kNSpInvalidGameRefErr
kNSpProtocolNotAvailableErr = -30366 #kNSpProtocolNotAvailableErr
kNSpHostFailedErr = -30365 #kNSpHostFailedErr
kNSpPipeFullErr = -30364 #kNSpPipeFullErr
kNSpTopologyNotSupportedErr = -30362 #kNSpTopologyNotSupportedErr
kNSpAlreadyInitializedErr = -30361 #kNSpAlreadyInitializedErr
kNSpInitializationFailedErr = -30360 #kNSpInitializationFailedErr
kSSpScaleToZeroErr = -30344 #kSSpScaleToZeroErr
kSSpParallelUpVectorErr = -30343 #kSSpParallelUpVectorErr
kSSpCantInstallErr = -30342 #kSSpCantInstallErr
kSSpVersionErr = -30341 #kSSpVersionErr
kSSpInternalErr = -30340 #kSSpInternalErr
kALMInternalErr = -30049 #kALMInternalErr
kALMGroupNotFoundErr = -30048 #kALMGroupNotFoundErr
kALMNoSuchModuleErr = -30047 #kALMNoSuchModuleErr
kALMModuleCommunicationErr = -30046 #kALMModuleCommunicationErr
kALMDuplicateModuleErr = -30045 #kALMDuplicateModuleErr
kALMInstallationErr = -30044 #kALMInstallationErr
kALMDeferSwitchErr = -30043 #kALMDeferSwitchErr
kALMRebootFlagsLevelErr = -30042 #kALMRebootFlagsLevelErr
kLocalesDefaultDisplayStatus = -30029 #Requested display locale unavailable, used default
kLocalesTableFormatErr = -30002 #kLocalesTableFormatErr
kLocalesBufferTooSmallErr = -30001 #kLocalesBufferTooSmallErr
kFNSNameNotFoundErr = -29589 #The name with the requested paramters was not found
kFNSBadFlattenedSizeErr = -29587 #flattened size didn't match input or was too small
kFNSInsufficientDataErr = -29586 #insufficient data for the operation
kFNSMismatchErr = -29585 #reference didn't match or wasn't found in profile
kFNSDuplicateReferenceErr = -29584 #the ref. being added is already in the profile
kFNSBadProfileVersionErr = -29583 #profile version is out of known range
kFNSInvalidProfileErr = -29582 #profile is NULL or otherwise bad
kFNSBadReferenceVersionErr = -29581 #ref. version is out of known range
kFNSInvalidReferenceErr = -29580 #ref. was NULL or otherwise bad
kCollateInvalidCollationRef = -29507 #kCollateInvalidCollationRef
kCollateBufferTooSmall = -29506 #kCollateBufferTooSmall
kCollateInvalidChar = -29505 #kCollateInvalidChar
kCollatePatternNotFoundErr = -29504 #kCollatePatternNotFoundErr
kCollateUnicodeConvertFailedErr = -29503 #kCollateUnicodeConvertFailedErr
kCollateMissingUnicodeTableErr = -29502 #kCollateMissingUnicodeTableErr
kCollateInvalidOptions = -29501 #kCollateInvalidOptions
kCollateAttributesNotFoundErr = -29500 #kCollateAttributesNotFoundErr
kMPInvalidIDErr = -29299 #kMPInvalidIDErr
kMPInsufficientResourcesErr = -29298 #kMPInsufficientResourcesErr
kMPTaskAbortedErr = -29297 #kMPTaskAbortedErr
kMPTimeoutErr = -29296 #kMPTimeoutErr
kMPDeletedErr = -29295 #kMPDeletedErr
kMPBlueBlockingErr = -29293 #kMPBlueBlockingErr
kMPTaskStoppedErr = -29292 #A convention used with MPThrowException.
kMPTaskBlockedErr = -29291 #kMPTaskBlockedErr
kMPTaskCreatedErr = -29290 #kMPTaskCreatedErr
kMPProcessTerminatedErr = -29289 #kMPProcessTerminatedErr
kMPProcessCreatedErr = -29288 #kMPProcessCreatedErr
kMPPrivilegedErr = -29276 #kMPPrivilegedErr
kMPIterationEndErr = -29275 #kMPIterationEndErr
kUCTextBreakLocatorMissingType = -25341 #Unicode text break error
kUCOutputBufferTooSmall = -25340 #Output buffer too small for Unicode string result
errKCCreateChainFailed = -25318 #errKCCreateChainFailed
errKCDataNotModifiable = -25317 #errKCDataNotModifiable
errKCDataNotAvailable = -25316 #errKCDataNotAvailable
errKCInteractionRequired = -25315 #errKCInteractionRequired
errKCNoPolicyModule = -25314 #errKCNoPolicyModule
errKCNoCertificateModule = -25313 #errKCNoCertificateModule
errKCNoStorageModule = -25312 #errKCNoStorageModule
errKCKeySizeNotAllowed = -25311 #errKCKeySizeNotAllowed
errKCWrongKCVersion = -25310 #errKCWrongKCVersion
errKCReadOnlyAttr = -25309 #errKCReadOnlyAttr
errKCInteractionNotAllowed = -25308 #errKCInteractionNotAllowed
errKCNoDefaultKeychain = -25307 #errKCNoDefaultKeychain
errKCNoSuchClass = -25306 #errKCNoSuchClass
errKCInvalidSearchRef = -25305 #errKCInvalidSearchRef
errKCInvalidItemRef = -25304 #errKCInvalidItemRef
errKCNoSuchAttr = -25303 #errKCNoSuchAttr
errKCDataTooLarge = -25302 #errKCDataTooLarge
errKCBufferTooSmall = -25301 #errKCBufferTooSmall
errKCItemNotFound = -25300 #errKCItemNotFound
errKCDuplicateItem = -25299 #errKCDuplicateItem
errKCInvalidCallback = -25298 #errKCInvalidCallback
errKCDuplicateCallback = -25297 #errKCDuplicateCallback
errKCDuplicateKeychain = -25296 #errKCDuplicateKeychain
errKCInvalidKeychain = -25295 #errKCInvalidKeychain
errKCNoSuchKeychain = -25294 #errKCNoSuchKeychain
errKCAuthFailed = -25293 #errKCAuthFailed
errKCReadOnly = -25292 #errKCReadOnly
errKCNotAvailable = -25291 #errKCNotAvailable
printerStatusOpCodeNotSupportedErr = -25280 #printerStatusOpCodeNotSupportedErr
kTXNOutsideOfFrameErr = -22018 #kTXNOutsideOfFrameErr
kTXNOutsideOfLineErr = -22017 #kTXNOutsideOfLineErr
kTXNATSUIIsNotInstalledErr = -22016 #kTXNATSUIIsNotInstalledErr
kTXNDataTypeNotAllowedErr = -22015 #kTXNDataTypeNotAllowedErr
kTXNCopyNotAllowedInEchoModeErr = -22014 #kTXNCopyNotAllowedInEchoModeErr
kTXNCannotTurnTSMOffWhenUsingUnicodeErr = -22013 #kTXNCannotTurnTSMOffWhenUsingUnicodeErr
kTXNAlreadyInitializedErr = -22012 #kTXNAlreadyInitializedErr
kTXNInvalidRunIndex = -22011 #kTXNInvalidRunIndex
kTXNSomeOrAllTagsInvalidForRunErr = -22010 #kTXNSomeOrAllTagsInvalidForRunErr
kTXNAttributeTagInvalidForRunErr = -22009 #dataValue is set to this per invalid tag
kTXNNoMatchErr = -22008 #kTXNNoMatchErr
kTXNRunIndexOutofBoundsErr = -22007 #kTXNRunIndexOutofBoundsErr
kTXNCannotSetAutoIndentErr = -22006 #kTXNCannotSetAutoIndentErr
kTXNBadDefaultFileTypeWarning = -22005 #kTXNBadDefaultFileTypeWarning
kTXNUserCanceledOperationErr = -22004 #kTXNUserCanceledOperationErr
kTXNIllegalToCrossDataBoundariesErr = -22003 #kTXNIllegalToCrossDataBoundariesErr
kTXNInvalidFrameIDErr = -22002 #kTXNInvalidFrameIDErr
kTXNCannotAddFrameErr = -22001 #kTXNCannotAddFrameErr
kTXNEndIterationErr = -22000 #kTXNEndIterationErr
invalidIndexErr = -20002 #The recordIndex parameter is not valid.
recordDataTooBigErr = -20001 #The record data is bigger than buffer size (1024 bytes).
unknownInsertModeErr = -20000 #There is no such an insert mode.
kModemScriptMissing = -14002 #kModemScriptMissing
kModemPreferencesMissing = -14001 #kModemPreferencesMissing
kModemOutOfMemory = -14000 #kModemOutOfMemory
kHIDBaseError = -13950 #kHIDBaseError
kHIDNullStateErr = -13949 #kHIDNullStateErr
kHIDBufferTooSmallErr = -13948 #kHIDBufferTooSmallErr
kHIDValueOutOfRangeErr = -13947 #kHIDValueOutOfRangeErr
kHIDUsageNotFoundErr = -13946 #kHIDUsageNotFoundErr
kHIDNotValueArrayErr = -13945 #kHIDNotValueArrayErr
kHIDInvalidPreparsedDataErr = -13944 #kHIDInvalidPreparsedDataErr
kHIDIncompatibleReportErr = -13943 #kHIDIncompatibleReportErr
kHIDBadLogPhysValuesErr = -13942 #kHIDBadLogPhysValuesErr
kHIDInvalidReportTypeErr = -13941 #kHIDInvalidReportTypeErr
kHIDInvalidReportLengthErr = -13940 #kHIDInvalidReportLengthErr
kHIDNullPointerErr = -13939 #kHIDNullPointerErr
kHIDBadParameterErr = -13938 #kHIDBadParameterErr
kHIDNotEnoughMemoryErr = -13937 #kHIDNotEnoughMemoryErr
kHIDEndOfDescriptorErr = -13936 #kHIDEndOfDescriptorErr
kHIDUsagePageZeroErr = -13935 #kHIDUsagePageZeroErr
kHIDBadLogicalMinimumErr = -13934 #kHIDBadLogicalMinimumErr
kHIDBadLogicalMaximumErr = -13933 #kHIDBadLogicalMaximumErr
kHIDInvertedLogicalRangeErr = -13932 #kHIDInvertedLogicalRangeErr
kHIDInvertedPhysicalRangeErr = -13931 #kHIDInvertedPhysicalRangeErr
kHIDUnmatchedUsageRangeErr = -13930 #kHIDUnmatchedUsageRangeErr
kHIDInvertedUsageRangeErr = -13929 #kHIDInvertedUsageRangeErr
kHIDUnmatchedStringRangeErr = -13928 #kHIDUnmatchedStringRangeErr
kHIDUnmatchedDesignatorRangeErr = -13927 #kHIDUnmatchedDesignatorRangeErr
kHIDReportSizeZeroErr = -13926 #kHIDReportSizeZeroErr
kHIDReportCountZeroErr = -13925 #kHIDReportCountZeroErr
kHIDReportIDZeroErr = -13924 #kHIDReportIDZeroErr
kHIDInvalidRangePageErr = -13923 #kHIDInvalidRangePageErr
kHIDDeviceNotReady = -13910 #The device is still initializing, try again later
kHIDVersionIncompatibleErr = -13909 #kHIDVersionIncompatibleErr
debuggingNoMatchErr = -13887 #debugging component or option not found at this index
debuggingNoCallbackErr = -13886 #debugging component has no callback
debuggingInvalidNameErr = -13885 #componentName or optionName is invalid (NULL)
debuggingInvalidOptionErr = -13884 #optionSelectorNum is not registered
debuggingInvalidSignatureErr = -13883 #componentSignature not registered
debuggingDuplicateOptionErr = -13882 #optionSelectorNum already registered
debuggingDuplicateSignatureErr = -13881 #componentSignature already registered
debuggingExecutionContextErr = -13880 #routine cannot be called at this time
kBridgeSoftwareRunningCantSleep = -13038 #kBridgeSoftwareRunningCantSleep
kNoSuchPowerSource = -13020 #kNoSuchPowerSource
kProcessorTempRoutineRequiresMPLib2 = -13014 #kProcessorTempRoutineRequiresMPLib2
kCantReportProcessorTemperatureErr = -13013 #kCantReportProcessorTemperatureErr
kPowerMgtRequestDenied = -13010 #kPowerMgtRequestDenied
kPowerMgtMessageNotHandled = -13009 #kPowerMgtMessageNotHandled
kPowerHandlerNotFoundForProcErr = -13008 #kPowerHandlerNotFoundForProcErr
kPowerHandlerNotFoundForDeviceErr = -13007 #kPowerHandlerNotFoundForDeviceErr
kPowerHandlerExistsForDeviceErr = -13006 #kPowerHandlerExistsForDeviceErr
pmRecvEndErr = -13005 #during receive, pmgr did not finish hs configured for this connection
pmRecvStartErr = -13004 #during receive, pmgr did not start hs
pmSendEndErr = -13003 #during send, pmgr did not finish hs
pmSendStartErr = -13002 #during send, pmgr did not start hs
pmReplyTOErr = -13001 #Timed out waiting for reply
pmBusyErr = -13000 #Power Mgr never ready to start handshake
pictureDataErr = -11005 #the picture data was invalid
colorsRequestedErr = -11004 #the number of colors requested was illegal
cantLoadPickMethodErr = -11003 #unable to load the custom pick proc
pictInfoVerbErr = -11002 #the passed verb was invalid
pictInfoIDErr = -11001 #the internal consistancy check for the PictInfoID is wrong
pictInfoVersionErr = -11000 #wrong version of the PictInfo structure
errTaskNotFound = -10780 #no task with that task id exists
telNotEnoughdspBW = -10116 #not enough real-time for allocation
telBadSampleRate = -10115 #incompatible sample rate
telBadSWErr = -10114 #Software not installed properly
telDetAlreadyOn = -10113 #detection is already turned on
telAutoAnsNotOn = -10112 #autoAnswer in not turned on
telValidateFailed = -10111 #telValidate failed
telBadProcID = -10110 #invalid procID
telDeviceNotFound = -10109 #device not found
telBadCodeResource = -10108 #code resource not found
telInitFailed = -10107 #initialization failed
telNoCommFolder = -10106 #Communications/Extensions not found
telUnknownErr = -10103 #unable to set config
telNoSuchTool = -10102 #unable to find tool with name specified
telBadFunction = -10091 #bad msgCode specified
telPBErr = -10090 #parameter block error, bad format
telCANotDeflectable = -10082 #CA not "deflectable"
telCANotRejectable = -10081 #CA not "rejectable"
telCANotAcceptable = -10080 #CA not "acceptable"
telTermNotOpen = -10072 #terminal not opened via TELOpenTerm
telStillNeeded = -10071 #terminal driver still needed by someone else
telAlreadyOpen = -10070 #terminal already open
telNoCallbackRef = -10064 #no call back reference was specified, but is required
telDisplayModeNotSupp = -10063 #display mode not supported by tool
telBadDisplayMode = -10062 #bad display mode specified
telFwdTypeNotSupp = -10061 #forward type not supported by tool
telDNTypeNotSupp = -10060 #DN type not supported by tool
telBadRate = -10059 #bad rate specified
telBadBearerType = -10058 #bad bearerType specified
telBadSelect = -10057 #unable to select or deselect DN
telBadParkID = -10056 #bad park id specified
telBadPickupGroupID = -10055 #bad pickup group ID specified
telBadFwdType = -10054 #bad fwdType specified
telBadFeatureID = -10053 #bad feature ID specified
telBadIntercomID = -10052 #bad intercom ID specified
telBadPageID = -10051 #bad page ID specified
telBadDNType = -10050 #DN type invalid
telConfLimitExceeded = -10047 #attempt to exceed switch conference limits
telCBErr = -10046 #call back feature not set previously
telTransferRej = -10045 #transfer request rejected
telTransferErr = -10044 #transfer not prepared
telConfRej = -10043 #conference request was rejected
telConfErr = -10042 #conference was not prepared
telConfNoLimit = -10041 #no limit was specified but required
telConfLimitErr = -10040 #limit specified is too high for this configuration
telFeatNotSupp = -10033 #feature program call not supported by this tool
telFeatActive = -10032 #feature already active
telFeatNotAvail = -10031 #feature subscribed but not available
telFeatNotSub = -10030 #feature not subscribed
errAEPropertiesClash = -10025 #illegal combination of properties settings for Set Data, make new, or duplicate
errAECantPutThatThere = -10024 #in make new, duplicate, etc. class can't be an element of container
errAENotAnEnumMember = -10023 #enumerated value in SetData is not allowed for this property
telIntExtNotSupp = -10022 #internal external type not supported by this tool
telBadIntExt = -10021 #bad internal external error
telStateNotSupp = -10020 #device state not supported by tool
telBadStateErr = -10019 #bad device state specified
telIndexNotSupp = -10018 #index not supported by this tool
telBadIndex = -10017 #bad index specified
telAPattNotSupp = -10016 #alerting pattern not supported by tool
telBadAPattErr = -10015 #bad alerting pattern specified
telVTypeNotSupp = -10014 #volume type not supported by this tool
telBadVTypeErr = -10013 #bad volume type error
telBadLevelErr = -10012 #bad volume level setting
telHTypeNotSupp = -10011 #hook type not supported by this tool
telBadHTypeErr = -10010 #bad hook type specified
errAECantSupplyType = -10009 #errAECantSupplyType
telNoOpenErr = -10008 #unable to open terminal
telNoMemErr = -10007 #no memory to allocate handle
errOSACantAssign = -10006 #Signaled when an object cannot be set in a container.
telBadProcErr = -10005 #bad msgProc specified
telBadHandErr = -10004 #bad handle specified
OSAIllegalAssign = -10003 #Signaled when an object can never be set in a container
telBadDNErr = -10002 #TELDNHandle not found or invalid
telBadTermErr = -10001 #invalid TELHandle or handle not found
errAEEventFailed = -10000 #errAEEventFailed
cannotMoveAttachedController = -9999 #cannotMoveAttachedController
controllerHasFixedHeight = -9998 #controllerHasFixedHeight
cannotSetWidthOfAttachedController = -9997 #cannotSetWidthOfAttachedController
controllerBoundsNotExact = -9996 #controllerBoundsNotExact
editingNotAllowed = -9995 #editingNotAllowed
badControllerHeight = -9994 #badControllerHeight
deviceCantMeetRequest = -9408 #deviceCantMeetRequest
seqGrabInfoNotAvailable = -9407 #seqGrabInfoNotAvailable
badSGChannel = -9406 #badSGChannel
couldntGetRequiredComponent = -9405 #couldntGetRequiredComponent
notEnoughDiskSpaceToGrab = -9404 #notEnoughDiskSpaceToGrab
notEnoughMemoryToGrab = -9403 #notEnoughMemoryToGrab
cantDoThatInCurrentMode = -9402 #cantDoThatInCurrentMode
grabTimeComplete = -9401 #grabTimeComplete
noDeviceForChannel = -9400 #noDeviceForChannel
kNoCardBusCISErr = -9109 #No valid CIS exists for this CardBus card
kNotZVCapableErr = -9108 #This socket does not support Zoomed Video
kCardPowerOffErr = -9107 #Power to the card has been turned off
kAttemptDupCardEntryErr = -9106 #The Enabler was asked to create a duplicate card entry
kAlreadySavedStateErr = -9105 #The state has been saved on previous call
kTooManyIOWindowsErr = -9104 #device requested more than one I/O window
kNotReadyErr = -9103 #PC Card failed to go ready
kClientRequestDenied = -9102 #CS Clients should return this code inorder to
kNoCompatibleNameErr = -9101 #There is no compatible driver name for this device
kNoEnablerForCardErr = -9100 #No Enablers were found that can support the card
kNoCardEnablersFoundErr = -9099 #No Enablers were found
kUnsupportedCardErr = -9098 #Card not supported by generic enabler
kNoClientTableErr = -9097 #The client table has not be initialized yet
kNoMoreInterruptSlotsErr = -9096 #All internal Interrupt slots are in use
kNoMoreTimerClientsErr = -9095 #All timer callbacks are in use
kNoIOWindowRequestedErr = -9094 #Request I/O window before calling configuration
kBadCustomIFIDErr = -9093 #Custom interface ID is invalid
kBadTupleDataErr = -9092 #Data in tuple is invalid
kInvalidCSClientErr = -9091 #Card Services ClientID is not registered
kUnsupportedVsErr = -9090 #Unsupported Voltage Sense
kInvalidDeviceNumber = -9089 #kInvalidDeviceNumber
kPostCardEventErr = -9088 #_PCCSLPostCardEvent failed and dropped an event
kCantConfigureCardErr = -9087 #kCantConfigureCardErr
kPassCallToChainErr = -9086 #kPassCallToChainErr
kCardBusCardErr = -9085 #kCardBusCardErr
k16BitCardErr = -9084 #k16BitCardErr
kBadDeviceErr = -9083 #kBadDeviceErr
kBadLinkErr = -9082 #kBadLinkErr
kInvalidRegEntryErr = -9081 #kInvalidRegEntryErr
kNoCardSevicesSocketsErr = -9080 #kNoCardSevicesSocketsErr
kOutOfResourceErr = -9079 #Card Services has exhausted the resource
kNoMoreItemsErr = -9078 #there are no more of the requested item
kInUseErr = -9077 #requested resource is being used by a client
kConfigurationLockedErr = -9076 #a configuration has already been locked
kWriteProtectedErr = -9075 #media is write-protected
kBusyErr = -9074 #unable to process request at this time - try later
kUnsupportedModeErr = -9073 #mode is not supported
kUnsupportedFunctionErr = -9072 #function is not supported by this implementation
kNoCardErr = -9071 #no PC card in the socket
kGeneralFailureErr = -9070 #an undefined error has occurred
kWriteFailureErr = -9069 #unable to complete write request
kReadFailureErr = -9068 #unable to complete read request
kBadSpeedErr = -9067 #specified speed is unavailable
kBadCISErr = -9066 #CIS on card is invalid
kBadHandleErr = -9065 #clientHandle is invalid
kBadArgsErr = -9064 #values in argument packet are invalid
kBadArgLengthErr = -9063 #ArgLength argument is invalid
kBadWindowErr = -9062 #specified window is invalid
kBadVppErr = -9061 #specified Vpp1 or Vpp2 power level index is invalid
kBadVccErr = -9060 #specified Vcc power level index is invalid
kBadTypeErr = -9059 #specified window or interface type is invalid
kBadSocketErr = -9058 #specified logical or physical socket number is invalid
kBadSizeErr = -9057 #specified size is invalid
kBadPageErr = -9056 #specified page is invalid
kBadOffsetErr = -9055 #specified PC card memory array offset is invalid
kBadIRQErr = -9054 #specified IRQ level is invalid
kBadEDCErr = -9053 #specified EDC generator specified is invalid
kBadBaseErr = -9052 #specified base system memory address is invalid
kBadAttributeErr = -9051 #specified attributes field value is invalid
kBadAdapterErr = -9050 #invalid adapter number
codecOffscreenFailedPleaseRetryErr = -8992 #codecOffscreenFailedPleaseRetryErr
lockPortBitsWrongGDeviceErr = -8991 #lockPortBitsWrongGDeviceErr
directXObjectAlreadyExists = -8990 #directXObjectAlreadyExists
codecDroppedFrameErr = -8989 #returned from ImageCodecDrawBand
codecOffscreenFailedErr = -8988 #codecOffscreenFailedErr
codecNeedAccessKeyErr = -8987 #codec needs password in order to decompress
codecParameterDialogConfirm = -8986 #codecParameterDialogConfirm
lockPortBitsSurfaceLostErr = -8985 #lockPortBitsSurfaceLostErr
lockPortBitsBadPortErr = -8984 #lockPortBitsBadPortErr
lockPortBitsWindowClippedErr = -8983 #lockPortBitsWindowClippedErr
lockPortBitsWindowResizedErr = -8982 #lockPortBitsWindowResizedErr
lockPortBitsWindowMovedErr = -8981 #lockPortBitsWindowMovedErr
lockPortBitsBadSurfaceErr = -8980 #lockPortBitsBadSurfaceErr
codecNeedToFlushChainErr = -8979 #codecNeedToFlushChainErr
codecDisabledErr = -8978 #codec disabled itself -- pass codecFlagReenable to reset
codecNoMemoryPleaseWaitErr = -8977 #codecNoMemoryPleaseWaitErr
codecNothingToBlitErr = -8976 #codecNothingToBlitErr
codecCantQueueErr = -8975 #codecCantQueueErr
codecCantWhenErr = -8974 #codecCantWhenErr
codecOpenErr = -8973 #codecOpenErr
codecConditionErr = -8972 #codecConditionErr
codecExtensionNotFoundErr = -8971 #codecExtensionNotFoundErr
codecDataVersErr = -8970 #codecDataVersErr
codecBadDataErr = -8969 #codecBadDataErr
codecWouldOffscreenErr = -8968 #codecWouldOffscreenErr
codecAbortErr = -8967 #codecAbortErr
codecSpoolErr = -8966 #codecSpoolErr
codecImageBufErr = -8965 #codecImageBufErr
codecScreenBufErr = -8964 #codecScreenBufErr
codecSizeErr = -8963 #codecSizeErr
codecUnimpErr = -8962 #codecUnimpErr
noCodecErr = -8961 #noCodecErr
codecErr = -8960 #codecErr
kIllegalClockValueErr = -8852 #kIllegalClockValueErr
kUTCOverflowErr = -8851 #kUTCOverflowErr
kUTCUnderflowErr = -8850 #kUTCUnderflowErr
kATSULastErr = -8809 #The last ATSUI error code.
kATSULineBreakInWord = -8808 #This is not an error code but is returned by ATSUBreakLine to
kATSUCoordinateOverflowErr = -8807 #Used to indicate the coordinates provided to an ATSUI routine caused
kATSUNoFontScalerAvailableErr = -8806 #Used when no font scaler is available for the font passed
kATSUNoFontCmapAvailableErr = -8805 #Used when no CMAP table can be accessed or synthesized for the
kATSULowLevelErr = -8804 #Used when an error was encountered within the low level ATS
kATSUQuickDrawTextErr = -8803 #Used when QuickDraw Text encounters an error rendering or measuring
kATSUNoStyleRunsAssignedErr = -8802 #Used when an attempt was made to measure, highlight or draw
kATSUNotSetErr = -8801 #Used when the client attempts to retrieve an attribute,
kATSUInvalidCacheErr = -8800 #Used when an attempt was made to read in style data
kATSUInvalidAttributeTagErr = -8799 #Used when an attempt was made to use a tag value that
kATSUInvalidAttributeSizeErr = -8798 #Used when an attempt was made to use an attribute with a
kATSUInvalidAttributeValueErr = -8797 #Used when an attempt was made to use an attribute with
kATSUInvalidFontErr = -8796 #Used when an attempt was made to use an invalid font ID.
kATSUNoCorrespondingFontErr = -8795 #This value is retrned by font ID conversion
kATSUFontsNotMatched = -8794 #This value is returned by ATSUMatchFontsToText()
kATSUFontsMatched = -8793 #This is not an error code but is returned by
kATSUInvalidTextRangeErr = -8792 #An attempt was made to extract information
kATSUInvalidStyleErr = -8791 #An attempt was made to use a ATSUStyle which
kATSUInvalidTextLayoutErr = -8790 #An attempt was made to use a ATSUTextLayout
kTECOutputBufferFullStatus = -8785 #output buffer has no room for conversion of next input text element (partial conversion)
kTECNeedFlushStatus = -8784 #kTECNeedFlushStatus
kTECUsedFallbacksStatus = -8783 #kTECUsedFallbacksStatus
kTECItemUnavailableErr = -8771 #item (e.g. name) not available for specified region (& encoding if relevant)
kTECGlobalsUnavailableErr = -8770 #globals have already been deallocated (premature TERM)
unicodeChecksumErr = -8769 #unicodeChecksumErr
unicodeNoTableErr = -8768 #unicodeNoTableErr
unicodeVariantErr = -8767 #unicodeVariantErr
unicodeFallbacksErr = -8766 #unicodeFallbacksErr
unicodePartConvertErr = -8765 #unicodePartConvertErr
unicodeBufErr = -8764 #unicodeBufErr
unicodeCharErr = -8763 #unicodeCharErr
unicodeElementErr = -8762 #unicodeElementErr
unicodeNotFoundErr = -8761 #unicodeNotFoundErr
unicodeTableFormatErr = -8760 #unicodeTableFormatErr
unicodeDirectionErr = -8759 #unicodeDirectionErr
unicodeContextualErr = -8758 #unicodeContextualErr
unicodeTextEncodingDataErr = -8757 #unicodeTextEncodingDataErr
kTECDirectionErr = -8756 #direction stack overflow, etc.
kTECIncompleteElementErr = -8755 #text element may be incomplete or is too long for internal buffers
kTECUnmappableElementErr = -8754 #kTECUnmappableElementErr
kTECPartialCharErr = -8753 #input buffer ends in the middle of a multibyte character, conversion stopped
kTECBadTextRunErr = -8752 #kTECBadTextRunErr
kTECArrayFullErr = -8751 #supplied name buffer or TextRun, TextEncoding, or UnicodeMapping array is too small
kTECBufferBelowMinimumSizeErr = -8750 #output buffer too small to allow processing of first input text element
kTECNoConversionPathErr = -8749 #kTECNoConversionPathErr
kTECCorruptConverterErr = -8748 #invalid converter object reference
kTECTableFormatErr = -8747 #kTECTableFormatErr
kTECTableChecksumErr = -8746 #kTECTableChecksumErr
kTECMissingTableErr = -8745 #kTECMissingTableErr
kTextUndefinedElementErr = -8740 #text conversion errors
kTextMalformedInputErr = -8739 #in DBCS, for example, high byte followed by invalid low byte
kTextUnsupportedEncodingErr = -8738 #specified encoding not supported for this operation
kRANotEnabled = -7139 #kRANotEnabled
kRACallBackFailed = -7138 #kRACallBackFailed
kRADuplicateIPAddr = -7137 #kRADuplicateIPAddr
kRANCPRejectedbyPeer = -7136 #kRANCPRejectedbyPeer
kRAExtAuthenticationFailed = -7135 #kRAExtAuthenticationFailed
kRAATalkInactive = -7134 #kRAATalkInactive
kRAPeerNotResponding = -7133 #kRAPeerNotResponding
kRAPPPPeerDisconnected = -7132 #kRAPPPPeerDisconnected
kRAPPPUserDisconnected = -7131 #kRAPPPUserDisconnected
kRAPPPNegotiationFailed = -7130 #kRAPPPNegotiationFailed
kRAPPPAuthenticationFailed = -7129 #kRAPPPAuthenticationFailed
kRAPPPProtocolRejected = -7128 #kRAPPPProtocolRejected
dcmBufferOverflowErr = -7127 #data is larger than buffer size
kRANotPrimaryInterface = -7126 #when IPCP is not primary TCP/IP intf.
kRATCPIPNotConfigured = -7125 #TCP/IP not configured, could be loaded
kRATCPIPInactive = -7124 #TCP/IP inactive, cannot be loaded
kRARemoteAccessNotReady = -7123 #kRARemoteAccessNotReady
kRAInitOpenTransportFailed = -7122 #kRAInitOpenTransportFailed
dcmProtectedErr = -7121 #need keyword to use dictionary
kRAUserPwdEntryRequired = -7120 #kRAUserPwdEntryRequired
kRAUserPwdChangeRequired = -7119 #kRAUserPwdChangeRequired
dcmBadFindMethodErr = -7118 #no such find method supported
kRAInvalidSerialProtocol = -7117 #kRAInvalidSerialProtocol
kRAInvalidPortState = -7116 #kRAInvalidPortState
dcmBadKeyErr = -7115 #bad key information
kRAPortBusy = -7114 #kRAPortBusy
kRAInstallationDamaged = -7113 #kRAInstallationDamaged
dcmBadFieldTypeErr = -7112 #no such field type supported
dcmBadFieldInfoErr = -7111 #incomplete information
dcmNecessaryFieldErr = -7110 #lack required/identify field
dcmDupRecordErr = -7109 #same record already exist
kRANotConnected = -7108 #kRANotConnected
dcmBlockFullErr = -7107 #dictionary block full
kRAMissingResources = -7106 #kRAMissingResources
dcmDictionaryBusyErr = -7105 #dictionary is busy
dcmDictionaryNotOpenErr = -7104 #dictionary not opened
dcmPermissionErr = -7103 #invalid permission
dcmBadDictionaryErr = -7102 #invalid dictionary
dcmNotDictionaryErr = -7101 #not dictionary
kRAInvalidParameter = -7100 #kRAInvalidParameter
laEngineNotFoundErr = -7000 #can't find the engine
laPropertyErr = -6999 #Error in properties
kUSBUnknownDeviceErr = -6998 #device ref not recognised
laPropertyIsReadOnlyErr = -6997 #the property is read only
laPropertyUnknownErr = -6996 #the property is unknown to this environment
laPropertyValueErr = -6995 #Invalid property value
laDictionaryTooManyErr = -6994 #too many dictionaries
laDictionaryUnknownErr = -6993 #can't use this dictionary with this environment
laDictionaryNotOpenedErr = -6992 #the dictionary is not opened
laTextOverFlowErr = -6991 #text is too long
laFailAnalysisErr = -6990 #analysis failed
laNoMoreMorphemeErr = -6989 #nothing to read
laInvalidPathErr = -6988 #path is not correct
kUSBNotHandled = -6987 #Notification was not handled (same as NotFound)
laEnvironmentNotFoundErr = -6986 #can't fint the specified environment
laEnvironmentBusyErr = -6985 #specified environment is used
laTooSmallBufferErr = -6984 #output buffer is too small to store any result
kUSBFlagsError = -6983 #Unused flags not zeroed
kUSBAbortedError = -6982 #Pipe aborted
kUSBNoBandwidthError = -6981 #Not enough bandwidth available
kUSBPipeIdleError = -6980 #Pipe is Idle, it will not accept transactions
kUSBPipeStalledError = -6979 #Pipe has stalled, error needs to be cleared
kUSBUnknownInterfaceErr = -6978 #Interface ref not recognised
kUSBDeviceBusy = -6977 #Device is already being configured
kUSBDevicePowerProblem = -6976 #Device has a power problem
kUSBInvalidBuffer = -6975 #bad buffer, usually nil
kUSBDeviceSuspended = -6974 #Device is suspended
kUSBDeviceNotSuspended = -6973 #device is not suspended for resume
kUSBDeviceDisconnected = -6972 #Disconnected during suspend or reset
kUSBTimedOut = -6971 #Transaction timed out.
kUSBQueueAborted = -6970 #Pipe zero stall cleared.
kUSBPortDisabled = -6969 #The port you are attached to is disabled, use USBDeviceReset.
kUSBBadDispatchTable = -6950 #Improper driver dispatch table
kUSBUnknownNotification = -6949 #Notification type not defined
kUSBQueueFull = -6948 #Internal queue maxxed
kUSBLinkErr = -6916 #kUSBLinkErr
kUSBCRCErr = -6915 #Pipe stall, bad CRC
kUSBBitstufErr = -6914 #Pipe stall, bitstuffing
kUSBDataToggleErr = -6913 #Pipe stall, Bad data toggle
kUSBEndpointStallErr = -6912 #Device didn't understand
kUSBNotRespondingErr = -6911 #Pipe stall, No device, device hung
kUSBPIDCheckErr = -6910 #Pipe stall, PID CRC error
kUSBWrongPIDErr = -6909 #Pipe stall, Bad or wrong PID
kUSBOverRunErr = -6908 #Packet too large or more data than buffer
kUSBUnderRunErr = -6907 #Less data than buffer
kUSBRes1Err = -6906 #kUSBRes1Err
kUSBRes2Err = -6905 #kUSBRes2Err
kUSBBufOvrRunErr = -6904 #Host hardware failure on data in, PCI busy?
kUSBBufUnderRunErr = -6903 #Host hardware failure on data out, PCI busy?
kUSBNotSent1Err = -6902 #Transaction not sent
kUSBNotSent2Err = -6901 #Transaction not sent
kDMFoundErr = -6232 #Did not proceed because we found an item
kDMMainDisplayCannotMoveErr = -6231 #Trying to move main display (or a display mirrored to it)
kDMDisplayAlreadyInstalledErr = -6230 #Attempt to add an already installed display.
kDMDisplayNotFoundErr = -6229 #Could not find item (will someday remove).
kDMDriverNotDisplayMgrAwareErr = -6228 #Video Driver does not support display manager.
kDMSWNotInitializedErr = -6227 #Required software not initialized (eg windowmanager or display mgr).
kSysSWTooOld = -6226 #Missing critical pieces of System Software.
kDMMirroringNotOn = -6225 #Returned by all calls that need mirroring to be on to do their thing.
kDMCantBlock = -6224 #Mirroring is already on, canÕt Block now (call DMUnMirror() first).
kDMMirroringBlocked = -6223 #DMBlockMirroring() has been called.
kDMWrongNumberOfDisplays = -6222 #Can only handle 2 displays for now.
kDMMirroringOnAlready = -6221 #Returned by all calls that need mirroring to be off to do their thing.
kDMGenErr = -6220 #Unexpected Error
kQTSSUnknownErr = -6150 #kQTSSUnknownErr
collectionVersionErr = -5753 #collectionVersionErr
collectionIndexRangeErr = -5752 #collectionIndexRangeErr
collectionItemNotFoundErr = -5751 #collectionItemNotFoundErr
collectionItemLockedErr = -5750 #collectionItemLockedErr
kNavMissingKindStringErr = -5699 #kNavMissingKindStringErr
kNavInvalidCustomControlMessageErr = -5698 #kNavInvalidCustomControlMessageErr
kNavCustomControlMessageFailedErr = -5697 #kNavCustomControlMessageFailedErr
kNavInvalidSystemConfigErr = -5696 #kNavInvalidSystemConfigErr
kNavWrongDialogClassErr = -5695 #kNavWrongDialogClassErr
kNavWrongDialogStateErr = -5694 #kNavWrongDialogStateErr
dialogNoTimeoutErr = -5640 #dialogNoTimeoutErr
menuInvalidErr = -5623 #menu is invalid
menuItemNotFoundErr = -5622 #specified menu item wasn't found
menuUsesSystemDefErr = -5621 #GetMenuDefinition failed because the menu uses the system MDEF
menuNotFoundErr = -5620 #specified menu or menu ID wasn't found
windowWrongStateErr = -5615 #window is not in a state that is valid for the current action
windowManagerInternalErr = -5614 #something really weird happened inside the window manager
windowAttributesConflictErr = -5613 #passed some attributes that are mutually exclusive
windowAttributeImmutableErr = -5612 #tried to change attributes which can't be changed
errWindowDoesNotFitOnscreen = -5611 #ConstrainWindowToScreen could not make the window fit onscreen
errWindowNotFound = -5610 #returned from FindWindowOfClass
errFloatingWindowsNotInitialized = -5609 #called HideFloatingWindows or ShowFloatingWindows without calling InitFloatingWindows
errWindowsAlreadyInitialized = -5608 #tried to call InitFloatingWindows twice, or called InitWindows and then floating windows
errUserWantsToDragWindow = -5607 #if returned from TrackWindowProxyDrag, you should call DragWindow on the window
errCorruptWindowDescription = -5606 #tried to load a corrupt window description (size or version fields incorrect)
errUnrecognizedWindowClass = -5605 #tried to create a window with a bad WindowClass
errWindowPropertyNotFound = -5604 #tried to get a nonexistent property
errInvalidWindowProperty = -5603 #tried to access a property tag with private creator
errWindowDoesNotHaveProxy = -5602 #tried to do something requiring a proxy to a window which doesnÕt have a proxy
errUnsupportedWindowAttributesForClass = -5601 #tried to create a window with WindowAttributes not supported by the WindowClass
errInvalidWindowPtr = -5600 #tried to pass a bad WindowRef argument
gestaltLocationErr = -5553 #gestalt function ptr wasn't in sysheap
gestaltDupSelectorErr = -5552 #tried to add an entry that already existed
gestaltUndefSelectorErr = -5551 #undefined selector was passed to Gestalt
gestaltUnknownErr = -5550 #value returned if Gestalt doesn't know the answer
envVersTooBig = -5502 #Version bigger than call can handle
envBadVers = -5501 #Version non-positive
envNotPresent = -5500 #returned by glue.
qtsAddressBusyErr = -5421 #qtsAddressBusyErr
qtsConnectionFailedErr = -5420 #qtsConnectionFailedErr
qtsTimeoutErr = -5408 #qtsTimeoutErr
qtsUnknownValueErr = -5407 #qtsUnknownValueErr
qtsTooMuchDataErr = -5406 #qtsTooMuchDataErr
qtsUnsupportedFeatureErr = -5405 #qtsUnsupportedFeatureErr
qtsUnsupportedRateErr = -5404 #qtsUnsupportedRateErr
qtsUnsupportedDataTypeErr = -5403 #qtsUnsupportedDataTypeErr
qtsBadDataErr = -5402 #something is wrong with the data
qtsBadStateErr = -5401 #qtsBadStateErr
qtsBadSelectorErr = -5400 #qtsBadSelectorErr
errIAEndOfTextRun = -5388 #errIAEndOfTextRun
errIATextExtractionErr = -5387 #errIATextExtractionErr
errIAInvalidDocument = -5386 #errIAInvalidDocument
errIACanceled = -5385 #errIACanceled
errIABufferTooSmall = -5384 #errIABufferTooSmall
errIANoMoreItems = -5383 #errIANoMoreItems
errIAParamErr = -5382 #errIAParamErr
errIAAllocationErr = -5381 #errIAAllocationErr
errIAUnknownErr = -5380 #errIAUnknownErr
hrURLNotHandledErr = -5363 #hrURLNotHandledErr
hrUnableToResizeHandleErr = -5362 #hrUnableToResizeHandleErr
hrMiscellaneousExceptionErr = -5361 #hrMiscellaneousExceptionErr
hrHTMLRenderingLibNotInstalledErr = -5360 #hrHTMLRenderingLibNotInstalledErr
errCannotUndo = -5253 #errCannotUndo
errNonContiuousAttribute = -5252 #errNonContiuousAttribute
errUnknownElement = -5251 #errUnknownElement
errReadOnlyText = -5250 #errReadOnlyText
errEmptyScrap = -5249 #errEmptyScrap
errNoHiliteText = -5248 #errNoHiliteText
errOffsetNotOnElementBounday = -5247 #errOffsetNotOnElementBounday
errInvalidRange = -5246 #errInvalidRange
errIteratorReachedEnd = -5245 #errIteratorReachedEnd
errEngineNotFound = -5244 #errEngineNotFound
errAlreadyInImagingMode = -5243 #errAlreadyInImagingMode
errNotInImagingMode = -5242 #errNotInImagingMode
errMarginWilllNotFit = -5241 #errMarginWilllNotFit
errUnknownAttributeTag = -5240 #errUnknownAttributeTag
afpSameNodeErr = -5063 #An Attempt was made to connect to a file server running on the same machine
afpAlreadyMounted = -5062 #The volume is already mounted
afpCantMountMoreSrvre = -5061 #The Maximum number of server connections has been reached
afpBadDirIDType = -5060 #afpBadDirIDType
afpCallNotAllowed = -5048 #The server knows what you wanted to do, but won't let you do it just now
afpAlreadyLoggedInErr = -5047 #User has been authenticated but is already logged in from another machine (and that's not allowed on this server)
afpPwdPolicyErr = -5046 #Password does not conform to servers password policy
afpPwdNeedsChangeErr = -5045 #The password needs to be changed
afpInsideTrashErr = -5044 #The folder being shared is inside the trash folder OR the shared folder is being moved into the trash folder
afpInsideSharedErr = -5043 #The folder being shared is inside a shared folder OR the folder contains a shared folder and is being moved into a shared folder
afpPwdExpiredErr = -5042 #The password being used is too old: this requires the user to change the password before log-in can continue
afpPwdTooShortErr = -5041 #The password being set is too short: there is a minimum length that must be met or exceeded
afpPwdSameErr = -5040 #Someone tried to change their password to the same password on a mantadory password change
afpBadIDErr = -5039 #afpBadIDErr
afpSameObjectErr = -5038 #afpSameObjectErr
afpCatalogChanged = -5037 #afpCatalogChanged
afpDiffVolErr = -5036 #afpDiffVolErr
afpIDExists = -5035 #afpIDExists
afpIDNotFound = -5034 #afpIDNotFound
afpContainsSharedErr = -5033 #the folder being shared contains a shared folder
afpObjectLocked = -5032 #Object is M/R/D/W inhibited
afpVolLocked = -5031 #Volume is Read-Only
afpIconTypeError = -5030 #Icon size specified different from existing icon size
afpDirNotFound = -5029 #Unknown directory specified
afpCantRename = -5028 #AFPRename cannot rename volume
afpServerGoingDown = -5027 #Server is shutting down
afpTooManyFilesOpen = -5026 #Maximum open file count reached
afpObjectTypeErr = -5025 #File/Directory specified where Directory/File expected
afpCallNotSupported = -5024 #Unsupported AFP call was made
afpUserNotAuth = -5023 #No AFPLogin call has successfully been made for this session
afpSessClosed = -5022 #Session closed
afpRangeOverlap = -5021 #Some or all of range already locked by same user
afpRangeNotLocked = -5020 #Tried to unlock range that was not locked by user
afpParmErr = -5019 #A specified parameter was out of allowable range
afpObjectNotFound = -5018 #Specified file or directory does not exist
afpObjectExists = -5017 #Specified destination file or directory already exists
afpNoServer = -5016 #Server not responding
afpNoMoreLocks = -5015 #Maximum lock limit reached
afpMiscErr = -5014 #Unexpected error encountered during execution
afpLockErr = -5013 #Some or all of requested range is locked by another user
afpItemNotFound = -5012 #Unknown UserName/UserID or missing comment/APPL entry
afpFlatVol = -5011 #Cannot create directory on specified volume
afpFileBusy = -5010 #Cannot delete an open file
afpEofError = -5009 #Read beyond logical end-of-file
afpDiskFull = -5008 #Insufficient free space on volume for operation
afpDirNotEmpty = -5007 #Cannot delete non-empty directory
afpDenyConflict = -5006 #Specified open/deny modes conflict with current open modes
afpCantMove = -5005 #Move destination is offspring of source, or root was specified
afpBitmapErr = -5004 #Bitmap contained bits undefined for call
afpBadVersNum = -5003 #Unknown AFP protocol version number specified
afpBadUAM = -5002 #Unknown user authentication method specified
afpAuthContinue = -5001 #Further information required to complete AFPLogin call
afpAccessDenied = -5000 #Insufficient access privileges for operation
illegalScrapFlavorSizeErr = -4999 #illegalScrapFlavorSizeErr
illegalScrapFlavorTypeErr = -4998 #illegalScrapFlavorTypeErr
illegalScrapFlavorFlagsErr = -4997 #illegalScrapFlavorFlagsErr
scrapFlavorSizeMismatchErr = -4996 #scrapFlavorSizeMismatchErr
scrapFlavorFlagsMismatchErr = -4995 #scrapFlavorFlagsMismatchErr
nilScrapFlavorDataErr = -4994 #nilScrapFlavorDataErr
noScrapPromiseKeeperErr = -4993 #noScrapPromiseKeeperErr
scrapPromiseNotKeptErr = -4992 #scrapPromiseNotKeptErr
processStateIncorrectErr = -4991 #processStateIncorrectErr
badScrapRefErr = -4990 #badScrapRefErr
duplicateScrapFlavorErr = -4989 #duplicateScrapFlavorErr
internalScrapErr = -4988 #internalScrapErr
coreFoundationUnknownErr = -4960 #coreFoundationUnknownErr
badRoutingSizeErr = -4276 #badRoutingSizeErr
routingNotFoundErr = -4275 #routingNotFoundErr
duplicateRoutingErr = -4274 #duplicateRoutingErr
invalidFolderTypeErr = -4273 #invalidFolderTypeErr
noMoreFolderDescErr = -4272 #noMoreFolderDescErr
duplicateFolderDescErr = -4271 #duplicateFolderDescErr
badFolderDescErr = -4270 #badFolderDescErr
cmCantGamutCheckError = -4217 #Gammut checking not supported by this ColorWorld
cmNamedColorNotFound = -4216 #NamedColor not found
cmCantCopyModifiedV1Profile = -4215 #Illegal to copy version 1 profiles that have been modified
cmRangeOverFlow = -4214 #Color conversion warning that some output color values over/underflowed and were clipped
cmInvalidProfileComment = -4213 #Bad Profile comment during drawpicture
cmNoGDevicesError = -4212 #Begin/End Matching -- no gdevices available
cmInvalidDstMap = -4211 #Destination pix/bit map was invalid
cmInvalidSrcMap = -4210 #Source pix/bit map was invalid
cmInvalidColorSpace = -4209 #Profile colorspace does not match bitmap type
cmErrIncompatibleProfile = -4208 #Other ColorSync Errors
cmSearchError = -4207 #cmSearchError
cmInvalidSearch = -4206 #Bad Search Handle
cmInvalidProfileLocation = -4205 #Operation not supported for this profile location
cmInvalidProfile = -4204 #A Profile must contain a 'cs1 ' tag to be valid
cmFatalProfileErr = -4203 #cmFatalProfileErr
cmCantDeleteElement = -4202 #cmCantDeleteElement
cmIndexRangeErr = -4201 #Tag index out of range
kNSLInitializationFailed = -4200 #UNABLE TO INITIALIZE THE MANAGER!!!!! DO NOT CONTINUE!!!!
kNSLNotInitialized = -4199 #kNSLNotInitialized
kNSLInsufficientSysVer = -4198 #kNSLInsufficientSysVer
kNSLInsufficientOTVer = -4197 #kNSLInsufficientOTVer
kNSLNoElementsInList = -4196 #kNSLNoElementsInList
kNSLBadReferenceErr = -4195 #kNSLBadReferenceErr
kNSLBadServiceTypeErr = -4194 #kNSLBadServiceTypeErr
kNSLBadDataTypeErr = -4193 #kNSLBadDataTypeErr
kNSLBadNetConnection = -4192 #kNSLBadNetConnection
kNSLNoSupportForService = -4191 #kNSLNoSupportForService
kNSLInvalidPluginSpec = -4190 #kNSLInvalidPluginSpec
kNSLRequestBufferAlreadyInList = -4189 #kNSLRequestBufferAlreadyInList
kNSLNoContextAvailable = -4188 #(ContinueLookup function ptr invalid)
kNSLBufferTooSmallForData = -4187 #(Client buffer too small for data from plugin)
kNSLCannotContinueLookup = -4186 #(Can't continue lookup; error or bad state)
kNSLBadClientInfoPtr = -4185 #(nil ClientAsyncInfoPtr; no reference available)
kNSLNullListPtr = -4184 #(client is trying to add items to a nil list)
kNSLBadProtocolTypeErr = -4183 #(client is trying to add a null protocol type)
kNSLPluginLoadFailed = -4182 #(manager unable to load one of the plugins)
kNSLNoPluginsFound = -4181 #(manager didn't find any valid plugins to load)
kNSLSearchAlreadyInProgress = -4180 #(you can only have one ongoing search per clientRef)
kNSLNoPluginsForSearch = -4179 #(no plugins will respond to search request; bad protocol(s)?)
kNSLNullNeighborhoodPtr = -4178 #(client passed a null neighborhood ptr)
kNSLSomePluginsFailedToLoad = -4177 #(one or more plugins failed to load, but at least one did load; this error isn't fatal)
kNSLErrNullPtrError = -4176 #kNSLErrNullPtrError
kNSLNotImplementedYet = -4175 #kNSLNotImplementedYet
kNSLUILibraryNotAvailable = -4174 #The NSL UI Library needs to be in the Extensions Folder
kNSLNoCarbonLib = -4173 #kNSLNoCarbonLib
kNSLBadURLSyntax = -4172 #URL contains illegal characters
kNSLSchedulerError = -4171 #A custom thread routine encountered an error
kNSL68kContextNotSupported = -4170 #no 68k allowed
noHelpForItem = -4009 #noHelpForItem
badProfileError = -4008 #badProfileError
colorSyncNotInstalled = -4007 #colorSyncNotInstalled
pickerCantLive = -4006 #pickerCantLive
cantLoadPackage = -4005 #cantLoadPackage
cantCreatePickerWindow = -4004 #cantCreatePickerWindow
cantLoadPicker = -4003 #cantLoadPicker
pickerResourceError = -4002 #pickerResourceError
requiredFlagsDontMatch = -4001 #requiredFlagsDontMatch
firstPickerError = -4000 #firstPickerError
kOTPortLostConnection = -3285 #
kOTUserRequestedErr = -3284 #
kOTConfigurationChangedErr = -3283 #
kOTBadConfigurationErr = -3282 #
kOTPortWasEjectedErr = -3281 #
kOTPortHasDiedErr = -3280 #
kOTClientNotInittedErr = -3279 #
kENOMSGErr = -3278 #
kESRCHErr = -3277 #
kEINPROGRESSErr = -3276 #
kENODATAErr = -3275 #
kENOSTRErr = -3274 #
kECANCELErr = -3273 #
kEBADMSGErr = -3272 #
kENOSRErr = -3271 #
kETIMEErr = -3270 #
kEPROTOErr = -3269 # fill out missing codes
kEHOSTUNREACHErr = -3264 #No route to host
kEHOSTDOWNErr = -3263 #Host is down
kECONNREFUSEDErr = -3260 #Connection refused
kETIMEDOUTErr = -3259 #Connection timed out
kETOOMANYREFSErr = -3258 #Too many references: can't splice
kESHUTDOWNErr = -3257 #Can't send after socket shutdown
kENOTCONNErr = -3256 #Socket is not connected
kEISCONNErr = -3255 #Socket is already connected
kENOBUFSErr = -3254 #No buffer space available
kECONNRESETErr = -3253 #Connection reset by peer
kECONNABORTEDErr = -3252 #Software caused connection abort
kENETRESETErr = -3251 #Network dropped connection on reset
kENETUNREACHErr = -3250 #Network is unreachable
kENETDOWNErr = -3249 #Network is down
kEADDRNOTAVAILErr = -3248 #Can't assign requested address
kEADDRINUSEErr = -3247 #Address already in use
kEOPNOTSUPPErr = -3244 #Operation not supported on socket
kESOCKTNOSUPPORTErr = -3243 #Socket type not supported
kEPROTONOSUPPORTErr = -3242 #Protocol not supported
kENOPROTOOPTErr = -3241 #Protocol not available
kEPROTOTYPEErr = -3240 #Protocol wrong type for socket
kEMSGSIZEErr = -3239 #Message too long
kEDESTADDRREQErr = -3238 #Destination address required
kENOTSOCKErr = -3237 #Socket operation on non-socket
kEALREADYErr = -3236 #
kEWOULDBLOCKErr = -3234 #Call would block, so was aborted
kERANGEErr = -3233 #Message size too large for STREAM
kEPIPEErr = -3231 #Broken pipe
kENOTTYErr = -3224 #Not a character device
kEINVALErr = -3221 #Invalid argument
kENODEVErr = -3218 #No such device
kOTDuplicateFoundErr = -3216 #OT generic duplicate found error
kEBUSYErr = -3215 #Device or resource busy
kEFAULTErr = -3213 #Bad address
kEACCESErr = -3212 #Permission denied
kOTOutOfMemoryErr = -3211 #OT ran out of memory, may be a temporary
kEAGAINErr = -3210 #Try operation again later
kEBADFErr = -3208 #Bad file number
kENXIOErr = -3205 #No such device or address
kEIOErr = -3204 #I/O error
kEINTRErr = -3203 #Interrupted system service
kENORSRCErr = -3202 #No such resource
kOTNotFoundErr = -3201 #OT generic not found error
kEPERMErr = -3200 #Permission denied
kOTCanceledErr = -3180 #XTI2OSStatus(TCANCELED) The command was cancelled
kOTBadSyncErr = -3179 #XTI2OSStatus(TBADSYNC) A synchronous call at interrupt time
kOTProtocolErr = -3178 #XTI2OSStatus(TPROTO) An unspecified provider error occurred
kOTQFullErr = -3177 #XTI2OSStatus(TQFULL)
kOTResAddressErr = -3176 #XTI2OSStatus(TRESADDR)
kOTResQLenErr = -3175 #XTI2OSStatus(TRESQLEN)
kOTProviderMismatchErr = -3174 #XTI2OSStatus(TPROVMISMATCH) Tried to accept on incompatible endpoint
kOTIndOutErr = -3173 #XTI2OSStatus(TINDOUT) Accept failed because of pending listen
kOTAddressBusyErr = -3172 #XTI2OSStatus(TADDRBUSY) Address requested is already in use
kOTBadQLenErr = -3171 #XTI2OSStatus(TBADQLEN) A Bind to an in-use addr with qlen > 0
kOTBadNameErr = -3170 #XTI2OSStatus(TBADNAME) A bad endpoint name was supplied
kOTNoStructureTypeErr = -3169 #XTI2OSStatus(TNOSTRUCTYPE) Bad structure type requested for OTAlloc
kOTStateChangeErr = -3168 #XTI2OSStatus(TSTATECHNG) State is changing - try again later
kOTNotSupportedErr = -3167 #XTI2OSStatus(TNOTSUPPORT) Command is not supported
kOTNoReleaseErr = -3166 #XTI2OSStatus(TNOREL) No orderly release indication available
kOTBadFlagErr = -3165 #XTI2OSStatus(TBADFLAG) A Bad flag value was supplied
kOTNoUDErrErr = -3164 #XTI2OSStatus(TNOUDERR) No Unit Data Error indication available
kOTNoDisconnectErr = -3163 #XTI2OSStatus(TNODIS) No disconnect indication available
kOTNoDataErr = -3162 #XTI2OSStatus(TNODATA) No data available for reading
kOTFlowErr = -3161 #XTI2OSStatus(TFLOW) Provider is flow-controlled
kOTBufferOverflowErr = -3160 #XTI2OSStatus(TBUFOVFLW) Passed buffer not big enough
kOTBadDataErr = -3159 #XTI2OSStatus(TBADDATA) An illegal amount of data was specified
kOTLookErr = -3158 #XTI2OSStatus(TLOOK) An event occurred - call Look()
kOTSysErrorErr = -3157 #XTI2OSStatus(TSYSERR) A system error occurred
kOTBadSequenceErr = -3156 #XTI2OSStatus(TBADSEQ) Sequence specified does not exist
kOTOutStateErr = -3155 #XTI2OSStatus(TOUTSTATE) Call issued in wrong state
kOTNoAddressErr = -3154 #XTI2OSStatus(TNOADDR) No address was specified
kOTBadReferenceErr = -3153 #XTI2OSStatus(TBADF) Bad provider reference
kOTAccessErr = -3152 #XTI2OSStatus(TACCES) Missing access permission
kOTBadOptionErr = -3151 #XTI2OSStatus(TBADOPT) A Bad option was specified
kOTBadAddressErr = -3150 #XTI2OSStatus(TBADADDR) A Bad address was specified
sktClosedErr = -3109 #sktClosedErr
recNotFnd = -3108 #recNotFnd
atpBadRsp = -3107 #atpBadRsp
atpLenErr = -3106 #atpLenErr
readQErr = -3105 #readQErr
extractErr = -3104 #extractErr
ckSumErr = -3103 #ckSumErr
noMPPErr = -3102 #noMPPErr
buf2SmallErr = -3101 #buf2SmallErr
noPrefAppErr = -3032 #noPrefAppErr
badTranslationSpecErr = -3031 #badTranslationSpecErr
noTranslationPathErr = -3030 #noTranslationPathErr
couldNotParseSourceFileErr = -3026 #Source document does not contain source type
invalidTranslationPathErr = -3025 #Source type to destination type not a valid path
retryComponentRegistrationErr = -3005 #retryComponentRegistrationErr
unresolvedComponentDLLErr = -3004 #unresolvedComponentDLLErr
componentDontRegister = -3003 #componentDontRegister
componentNotCaptured = -3002 #componentNotCaptured
validInstancesExist = -3001 #validInstancesExist
invalidComponentID = -3000 #invalidComponentID
cfragLastErrCode = -2899 #The last value in the range of CFM errors.
cfragOutputLengthErr = -2831 #An output parameter is too small to hold the value.
cfragAbortClosureErr = -2830 #Used by notification handlers to abort a closure.
cfragClosureIDErr = -2829 #The closure ID was not valid.
cfragContainerIDErr = -2828 #The fragment container ID was not valid.
cfragNoRegistrationErr = -2827 #The registration name was not found.
cfragNotClosureErr = -2826 #The closure ID was actually a connection ID.
cfragFileSizeErr = -2825 #A file was too large to be mapped.
cfragFragmentUsageErr = -2824 #A semantic error in usage of the fragment.
cfragArchitectureErr = -2823 #A fragment has an unacceptable architecture.
cfragNoApplicationErr = -2822 #No application member found in the cfrg resource.
cfragInitFunctionErr = -2821 #A fragment's initialization routine returned an error.
cfragFragmentCorruptErr = -2820 #A fragment's container was corrupt (known format).
cfragCFMInternalErr = -2819 #An internal inconstistancy has been detected.
cfragCFMStartupErr = -2818 #Internal error during CFM initialization.
cfragLibConnErr = -2817 #
cfragInitAtBootErr = -2816 #A boot library has an initialization function. (System 7 only)
cfragInitLoopErr = -2815 #Circularity in required initialization order.
cfragImportTooNewErr = -2814 #An import library was too new for a client.
cfragImportTooOldErr = -2813 #An import library was too old for a client.
cfragInitOrderErr = -2812 #
cfragNoIDsErr = -2811 #No more CFM IDs for contexts, connections, etc.
cfragNoClientMemErr = -2810 #Out of memory for fragment mapping or section instances.
cfragNoPrivateMemErr = -2809 #Out of memory for internal bookkeeping.
cfragNoPositionErr = -2808 #The registration insertion point was not found.
cfragUnresolvedErr = -2807 #A fragment had "hard" unresolved imports.
cfragFragmentFormatErr = -2806 #A fragment's container format is unknown.
cfragDupRegistrationErr = -2805 #The registration name was already in use.
cfragNoLibraryErr = -2804 #The named library was not found.
cfragNoSectionErr = -2803 #The specified section was not found.
cfragNoSymbolErr = -2802 #The specified symbol was not found.
cfragConnectionIDErr = -2801 #The connection ID was not valid.
cfragFirstErrCode = -2800 #The first value in the range of CFM errors.
errASInconsistentNames = -2780 #English errors:
errASNoResultReturned = -2763 #The range -2780 thru -2799 is reserved for dialect specific error codes. (Error codes from different dialects may overlap.)
errASParameterNotForEvent = -2762 #errASParameterNotForEvent
errASIllegalFormalParameter = -2761 #errASIllegalFormalParameter
errASTerminologyNestingTooDeep = -2760 #errASTerminologyNestingTooDeep
OSAControlFlowError = -2755 #Signaled when illegal control flow occurs in an application (no catcher for throw, non-lexical loop exit, etc.)
OSAInconsistentDeclarations = -2754 #Signaled when a variable is declared inconsistently in the same scope, such as both local and global
OSAUndefinedVariable = -2753 #Signaled when a variable is accessed that has no value
OSADuplicateHandler = -2752 #Signaled when more than one handler is defined with the same name in a scope where the language doesn't allow it
OSADuplicateProperty = -2751 #Signaled when a formal parameter, local variable, or instance variable is specified more than once.
OSADuplicateParameter = -2750 #Signaled when a formal parameter, local variable, or instance variable is specified more than once
OSATokenTooLong = -2742 #Signaled when a name or number is too long to be parsed
OSASyntaxTypeError = -2741 #Signaled when another form of syntax was expected. (e.g. "expected a <type> but found <this>")
OSASyntaxError = -2740 #Signaled when a syntax error occurs. (e.g. "Syntax error" or "<this> can't go after <that>")
errASCantCompareMoreThan32k = -2721 #Parser/Compiler errors:
errASCantConsiderAndIgnore = -2720 #errASCantConsiderAndIgnore
errOSACantCreate = -2710 #errOSACantCreate
errOSACantGetTerminology = -2709 #errOSACantGetTerminology
errOSADataBlockTooLarge = -2708 #Signaled when an intrinsic limitation is exceeded for the size of a value or data structure.
errOSAInternalTableOverflow = -2707 #Signaled when a runtime internal data structure overflows
errOSAStackOverflow = -2706 #Signaled when the runtime stack overflows
errOSACorruptTerminology = -2705 #Signaled when an application's terminology resource is not readable
errOSAAppNotHighLevelEventAware = -2704 #Signaled when an application can't respond to AppleEvents
errOSACantLaunch = -2703 #Signaled when application can't be launched or when it is remote and program linking is not enabled
errOSANumericOverflow = -2702 #Signaled when integer or real value is too large to be represented
errOSADivideByZero = -2701 #Signaled when there is an attempt to divide by zero
errOSAGeneralError = -2700 #Signaled by user scripts or applications when no actual error code is to be returned.
noIconDataAvailableErr = -2582 #The necessary icon data is not available
noSuchIconErr = -2581 #The requested icon could not be found
invalidIconRefErr = -2580 #The icon ref is not valid
nrCallNotSupported = -2557 #This call is not available or supported on this machine
nrTransactionAborted = -2556 #transaction was aborted
nrExitedIteratorScope = -2555 #outer scope of iterator was exited
nrIterationDone = -2554 #iteration operation is done
nrPropertyAlreadyExists = -2553 #property already exists
nrInvalidEntryIterationOp = -2552 #invalid entry iteration operation
nrPathBufferTooSmall = -2551 #buffer for path is too small
nrPathNotFound = -2550 #a path component lookup failed
nrResultCodeBase = -2549 #nrResultCodeBase
nrOverrunErr = -2548 #nrOverrunErr
nrNotModifiedErr = -2547 #nrNotModifiedErr
nrTypeMismatchErr = -2546 #nrTypeMismatchErr
nrPowerSwitchAbortErr = -2545 #nrPowerSwitchAbortErr
nrPowerErr = -2544 #nrPowerErr
nrDataTruncatedErr = -2543 #nrDataTruncatedErr
nrNotSlotDeviceErr = -2542 #nrNotSlotDeviceErr
nrNameErr = -2541 #nrNameErr
nrNotCreatedErr = -2540 #nrNotCreatedErr
nrNotFoundErr = -2539 #nrNotFoundErr
nrInvalidNodeErr = -2538 #nrInvalidNodeErr
nrNotEnoughMemoryErr = -2537 #nrNotEnoughMemoryErr
nrLockedErr = -2536 #nrLockedErr
mmInternalError = -2526 #mmInternalError
tsmDefaultIsNotInputMethodErr = -2524 #Current Input source is KCHR or uchr, not Input Method (GetDefaultInputMethod)
tsmNoStem = -2523 #No stem exists for the token
tsmNoMoreTokens = -2522 #No more tokens are available for the source text
tsmNoHandler = -2521 #No Callback Handler exists for callback
tsmInvalidContext = -2520 #Invalid TSMContext specified in call
tsmUnknownErr = -2519 #any other errors
tsmUnsupportedTypeErr = -2518 #unSupported interface type error
tsmScriptHasNoIMErr = -2517 #script has no imput method or is using old IM
tsmInputMethodIsOldErr = -2516 #returned by GetDefaultInputMethod
tsmComponentAlreadyOpenErr = -2515 #text service already opened for the document
tsmTSNotOpenErr = -2514 #text service is not open
tsmTSHasNoMenuErr = -2513 #the text service has no menu
tsmUseInputWindowErr = -2512 #not TSM aware because we are using input window
tsmDocumentOpenErr = -2511 #there are open documents
tsmTextServiceNotFoundErr = -2510 #no text service found
tsmCantOpenComponentErr = -2509 #canÕt open the component
tsmNoOpenTSErr = -2508 #no open text service
tsmDocNotActiveErr = -2507 #document is NOT active
tsmTSMDocBusyErr = -2506 #document is still active
tsmInvalidDocIDErr = -2505 #invalid TSM documentation id
tsmNeverRegisteredErr = -2504 #app never registered error (not TSM aware)
tsmAlreadyRegisteredErr = -2503 #want to register again error
tsmNotAnAppErr = -2502 #not an application error
tsmInputMethodNotFoundErr = -2501 #tsmInputMethodNotFoundErr
tsmUnsupScriptLanguageErr = -2500 #tsmUnsupScriptLanguageErr
kernelUnrecoverableErr = -2499 #kernelUnrecoverableErr
kernelReturnValueErr = -2422 #kernelReturnValueErr
kernelAlreadyFreeErr = -2421 #kernelAlreadyFreeErr
kernelIDErr = -2419 #kernelIDErr
kernelExceptionErr = -2418 #kernelExceptionErr
kernelTerminatedErr = -2417 #kernelTerminatedErr
kernelInUseErr = -2416 #kernelInUseErr
kernelTimeoutErr = -2415 #kernelTimeoutErr
kernelAsyncReceiveLimitErr = -2414 #kernelAsyncReceiveLimitErr
kernelAsyncSendLimitErr = -2413 #kernelAsyncSendLimitErr
kernelAttributeErr = -2412 #kernelAttributeErr
kernelExecutionLevelErr = -2411 #kernelExecutionLevelErr
kernelDeletePermissionErr = -2410 #kernelDeletePermissionErr
kernelExecutePermissionErr = -2409 #kernelExecutePermissionErr
kernelReadPermissionErr = -2408 #kernelReadPermissionErr
kernelWritePermissionErr = -2407 #kernelWritePermissionErr
kernelObjectExistsErr = -2406 #kernelObjectExistsErr
kernelUnsupportedErr = -2405 #kernelUnsupportedErr
kernelPrivilegeErr = -2404 #kernelPrivilegeErr
kernelOptionsErr = -2403 #kernelOptionsErr
kernelCanceledErr = -2402 #kernelCanceledErr
kernelIncompleteErr = -2401 #kernelIncompleteErr
badCallOrderErr = -2209 #Usually due to a status call being called prior to being setup first
noDMAErr = -2208 #CanÕt do DMA digitizing (i.e. can't go to requested dest
badDepthErr = -2207 #CanÕt digitize into this depth
notExactSizeErr = -2206 #CanÕt do exact size requested
noMoreKeyColorsErr = -2205 #all key indexes in use
notExactMatrixErr = -2204 #warning of bad matrix, digitizer did its best
matrixErr = -2203 #bad matrix, digitizer did nothing
qtParamErr = -2202 #bad input parameter (out of range, etc)
digiUnimpErr = -2201 #feature unimplemented
qtXMLApplicationErr = -2159 #qtXMLApplicationErr
qtXMLParseErr = -2158 #qtXMLParseErr
qtActionNotHandledErr = -2157 #qtActionNotHandledErr
notEnoughDataErr = -2149 #notEnoughDataErr
urlDataHFTPURLErr = -2148 #urlDataHFTPURLErr
urlDataHFTPServerDisconnectedErr = -2147 #urlDataHFTPServerDisconnectedErr
urlDataHFTPNoPasswordErr = -2146 #urlDataHFTPNoPasswordErr
urlDataHFTPNeedPasswordErr = -2145 #urlDataHFTPNeedPasswordErr
urlDataHFTPBadNameListErr = -2144 #urlDataHFTPBadNameListErr
urlDataHFTPNoNetDriverErr = -2143 #urlDataHFTPNoNetDriverErr
urlDataHFTPFilenameErr = -2142 #urlDataHFTPFilenameErr
urlDataHFTPPermissionsErr = -2141 #urlDataHFTPPermissionsErr
urlDataHFTPQuotaErr = -2140 #urlDataHFTPQuotaErr
urlDataHFTPNoDirectoryErr = -2139 #urlDataHFTPNoDirectoryErr
urlDataHFTPDataConnectionErr = -2138 #urlDataHFTPDataConnectionErr
urlDataHFTPServerErr = -2137 #urlDataHFTPServerErr
urlDataHFTPBadPasswordErr = -2136 #urlDataHFTPBadPasswordErr
urlDataHFTPBadUserErr = -2135 #urlDataHFTPBadUserErr
urlDataHFTPShutdownErr = -2134 #urlDataHFTPShutdownErr
urlDataHFTPProtocolErr = -2133 #urlDataHFTPProtocolErr
urlDataHHTTPRedirectErr = -2132 #urlDataHHTTPRedirectErr
urlDataHHTTPURLErr = -2131 #urlDataHHTTPURLErr
urlDataHHTTPNoNetDriverErr = -2130 #urlDataHHTTPNoNetDriverErr
urlDataHHTTPProtocolErr = -2129 #urlDataHHTTPProtocolErr
qtNetworkAlreadyAllocatedErr = -2127 #qtNetworkAlreadyAllocatedErr
notAllowedToSaveMovieErr = -2126 #notAllowedToSaveMovieErr
fileOffsetTooBigErr = -2125 #fileOffsetTooBigErr
ASDEntryNotFoundErr = -2124 #ASDEntryNotFoundErr
ASDBadForkErr = -2123 #ASDBadForkErr
ASDBadHeaderErr = -2122 #ASDBadHeaderErr
AAPNotFoundErr = -2121 #AAPNotFoundErr
AAPNotCreatedErr = -2120 #AAPNotCreatedErr
qfcbNotCreatedErr = -2119 #qfcbNotCreatedErr
qfcbNotFoundErr = -2118 #qfcbNotFoundErr
wackBadMetaDataErr = -2117 #wackBadMetaDataErr
wackForkNotFoundErr = -2116 #wackForkNotFoundErr
wackBadFileErr = -2115 #wackBadFileErr
unknownFormatErr = -2114 #unknownFormatErr
pathNotVerifiedErr = -2113 #pathNotVerifiedErr
noPathMappingErr = -2112 #noPathMappingErr
emptyPathErr = -2111 #emptyPathErr
pathTooLongErr = -2110 #pathTooLongErr
cannotBeLeafAtomErr = -2109 #cannotBeLeafAtomErr
invalidAtomTypeErr = -2108 #invalidAtomTypeErr
invalidAtomContainerErr = -2107 #invalidAtomContainerErr
invalidAtomErr = -2106 #invalidAtomErr
duplicateAtomTypeAndIDErr = -2105 #duplicateAtomTypeAndIDErr
atomIndexInvalidErr = -2104 #atomIndexInvalidErr
atomsNotOfSameTypeErr = -2103 #atomsNotOfSameTypeErr
notLeafAtomErr = -2102 #notLeafAtomErr
cannotFindAtomErr = -2101 #cannotFindAtomErr
unsupportedProcessorErr = -2097 #unsupportedProcessorErr
unsupportedOSErr = -2096 #unsupportedOSErr
qtmlUninitialized = -2095 #qtmlUninitialized
qtmlDllEntryNotFoundErr = -2094 #Windows specific errors (when qtml is loading)
qtmlDllLoadErr = -2093 #Windows specific errors (when qtml is loading)
componentDllEntryNotFoundErr = -2092 #Windows specific errors (when component is loading)
componentDllLoadErr = -2091 #Windows specific errors (when component is loading)
videoOutputInUseErr = -2090 #videoOutputInUseErr
noExportProcAvailableErr = -2089 #noExportProcAvailableErr
tuneParseOSErr = -2087 #tuneParseOSErr
tunePlayerFullOSErr = -2086 #tunePlayerFullOSErr
noteChannelNotAllocatedOSErr = -2085 #noteChannelNotAllocatedOSErr
illegalNoteChannelOSErr = -2084 #illegalNoteChannelOSErr
synthesizerOSErr = -2083 #synthesizerOSErr
synthesizerNotRespondingOSErr = -2082 #synthesizerNotRespondingOSErr
midiManagerAbsentOSErr = -2081 #midiManagerAbsentOSErr
illegalControllerOSErr = -2080 #illegalControllerOSErr
illegalInstrumentOSErr = -2079 #illegalInstrumentOSErr
illegalKnobValueOSErr = -2078 #illegalKnobValueOSErr
illegalKnobOSErr = -2077 #illegalKnobOSErr
illegalChannelOSErr = -2076 #illegalChannelOSErr
illegalPartOSErr = -2075 #illegalPartOSErr
illegalVoiceAllocationOSErr = -2074 #illegalVoiceAllocationOSErr
cantReceiveFromSynthesizerOSErr = -2073 #cantReceiveFromSynthesizerOSErr
cantSendToSynthesizerOSErr = -2072 #cantSendToSynthesizerOSErr
notImplementedMusicOSErr = -2071 #notImplementedMusicOSErr
internalComponentErr = -2070 #internalComponentErr
invalidSpriteIDErr = -2069 #invalidSpriteIDErr
invalidImageIndexErr = -2068 #invalidImageIndexErr
invalidSpriteIndexErr = -2067 #invalidSpriteIndexErr
gWorldsNotSameDepthAndSizeErr = -2066 #gWorldsNotSameDepthAndSizeErr
invalidSpritePropertyErr = -2065 #invalidSpritePropertyErr
invalidSpriteWorldPropertyErr = -2064 #invalidSpriteWorldPropertyErr
missingRequiredParameterErr = -2063 #missingRequiredParameterErr
movieTextNotFoundErr = -2062 #movieTextNotFoundErr
sourceNotFoundErr = -2061 #sourceNotFoundErr
noSourceTreeFoundErr = -2060 #noSourceTreeFoundErr
samplesAlreadyInMediaErr = -2059 #samplesAlreadyInMediaErr
auxiliaryExportDataUnavailable = -2058 #auxiliaryExportDataUnavailable
unsupportedAuxiliaryImportData = -2057 #unsupportedAuxiliaryImportData
soundSupportNotAvailableErr = -2056 #QT for Windows error
noSoundTrackInMovieErr = -2055 #QT for Windows error
noVideoTrackInMovieErr = -2054 #QT for Windows error
featureUnsupported = -2053 #featureUnsupported
couldNotUseAnExistingSample = -2052 #couldNotUseAnExistingSample
noDefaultDataRef = -2051 #noDefaultDataRef
badDataRefIndex = -2050 #badDataRefIndex
invalidDataRefContainer = -2049 #invalidDataRefContainer
noMovieFound = -2048 #noMovieFound
dataNoDataRef = -2047 #dataNoDataRef
endOfDataReached = -2046 #endOfDataReached
dataAlreadyClosed = -2045 #dataAlreadyClosed
dataAlreadyOpenForWrite = -2044 #dataAlreadyOpenForWrite
dataNotOpenForWrite = -2043 #dataNotOpenForWrite
dataNotOpenForRead = -2042 #dataNotOpenForRead
invalidSampleDescription = -2041 #invalidSampleDescription
invalidChunkCache = -2040 #invalidChunkCache
invalidSampleDescIndex = -2039 #invalidSampleDescIndex
invalidChunkNum = -2038 #invalidChunkNum
invalidSampleNum = -2037 #invalidSampleNum
invalidRect = -2036 #invalidRect
cantEnableTrack = -2035 #cantEnableTrack
internalQuickTimeError = -2034 #internalQuickTimeError
badEditIndex = -2033 #badEditIndex
timeNotInMedia = -2032 #timeNotInMedia
timeNotInTrack = -2031 #timeNotInTrack
trackNotInMovie = -2030 #trackNotInMovie
trackIDNotFound = -2029 #trackIDNotFound
badTrackIndex = -2028 #badTrackIndex
maxSizeToGrowTooSmall = -2027 #maxSizeToGrowTooSmall
userDataItemNotFound = -2026 #userDataItemNotFound
staleEditState = -2025 #staleEditState
nonMatchingEditState = -2024 #nonMatchingEditState
invalidEditState = -2023 #invalidEditState
cantCreateSingleForkFile = -2022 #happens when file already exists
wfFileNotFound = -2021 #wfFileNotFound
movieToolboxUninitialized = -2020 #movieToolboxUninitialized
progressProcAborted = -2019 #progressProcAborted
mediaTypesDontMatch = -2018 #mediaTypesDontMatch
badEditList = -2017 #badEditList
cantPutPublicMovieAtom = -2016 #cantPutPublicMovieAtom
invalidTime = -2015 #invalidTime
invalidDuration = -2014 #invalidDuration
invalidHandler = -2013 #invalidHandler
invalidDataRef = -2012 #invalidDataRef
invalidSampleTable = -2011 #invalidSampleTable
invalidMovie = -2010 #invalidMovie
invalidTrack = -2009 #invalidTrack
invalidMedia = -2008 #invalidMedia
noDataHandler = -2007 #noDataHandler
noMediaHandler = -2006 #noMediaHandler
badComponentType = -2005 #badComponentType
cantOpenHandler = -2004 #cantOpenHandler
cantFindHandler = -2003 #cantFindHandler
badPublicMovieAtom = -2002 #badPublicMovieAtom
badImageDescription = -2001 #badImageDescription
couldNotResolveDataRef = -2000 #couldNotResolveDataRef
nonDragOriginatorErr = -1862 #illegal attempt at originator only data
badImageErr = -1861 #bad translucent image PixMap
badImageRgnErr = -1860 #bad translucent image region
noSuitableDisplaysErr = -1859 #no displays support translucency
unsupportedForPlatformErr = -1858 #call is for PowerPC only
dragNotAcceptedErr = -1857 #drag was not accepted by receiver
handlerNotFoundErr = -1856 #handler not found
duplicateHandlerErr = -1855 #handler already exists
cantGetFlavorErr = -1854 #error while trying to get flavor data
duplicateFlavorErr = -1853 #flavor type already exists
badDragFlavorErr = -1852 #unknown flavor type
badDragItemErr = -1851 #unknown drag item reference
badDragRefErr = -1850 #unknown drag reference
errEndOfBody = -1813 #errEndOfBody
errEndOfDocument = -1812 #errEndOfDocument
errTopOfBody = -1811 #errTopOfBody
errTopOfDocument = -1810 #errTopOfDocument
errOffsetIsOutsideOfView = -1801 #errOffsetIsOutsideOfView
errOffsetInvalid = -1800 #errOffsetInvalid
errOSACantOpenComponent = -1762 #Can't connect to scripting system with that ID
errOSAComponentMismatch = -1761 #Parameters are from 2 different components
errOSADataFormatTooNew = -1759 #errOSADataFormatTooNew
errOSADataFormatObsolete = -1758 #errOSADataFormatObsolete
errOSANoSuchDialect = -1757 #errOSANoSuchDialect
errOSASourceNotAvailable = -1756 #errOSASourceNotAvailable
errOSABadSelector = -1754 #errOSABadSelector
errOSAScriptError = -1753 #errOSAScriptError
errOSABadStorageType = -1752 #errOSABadStorageType
errOSAInvalidID = -1751 #errOSAInvalidID
errOSASystemError = -1750 #errOSASystemError
errAEBufferTooSmall = -1741 #buffer for AEFlattenDesc too small
errAEBuildSyntaxError = -1740 #AEBuildDesc and friends detected a syntax error
errAEDescIsNull = -1739 #attempting to perform an invalid operation on a null descriptor
errAEStreamAlreadyConverted = -1738 #attempt to convert a stream that has already been converted
errAEStreamBadNesting = -1737 #nesting violation while streaming
errAEDuplicateHandler = -1736 #attempt to install handler in table for identical class and id (1.1 or greater)
errAEEventFiltered = -1735 #event has been filtered, and should not be propogated (1.1 or greater)
errAEReceiveEscapeCurrent = -1734 #break out of only lowest level of AEReceive (1.1 or greater)
errAEReceiveTerminate = -1733 #break out of all levels of AEReceive to the topmost (1.1 or greater)
errAERecordingIsAlreadyOn = -1732 #available only in version 1.0.1 or greater
errAEUnknownObjectType = -1731 #available only in version 1.0.1 or greater
errAEEmptyListContainer = -1730 #Attempt to pass empty list as container to accessor
errAENegativeCount = -1729 #CountProc returned negative value
errAENoSuchObject = -1728 #e.g.,: specifier asked for the 3rd, but there are only 2. Basically, this indicates a run-time resolution error.
errAENotAnObjSpec = -1727 #Param to AEResolve not of type 'obj '
errAEBadTestKey = -1726 #Test is neither typeLogicalDescriptor nor typeCompDescriptor
errAENoSuchLogical = -1725 #Something other than AND, OR, or NOT
errAEAccessorNotFound = -1723 #Accessor proc matching wantClass and containerType or wildcards not found
errAEWrongNumberArgs = -1721 #Logical op kAENOT used with other than 1 term
errAEImpossibleRange = -1720 #A range like 3rd to 2nd, or 1st to all.
errAEIllegalIndex = -1719 #index is out of range in a put operation
errAEReplyNotArrived = -1718 #the contents of the reply you are accessing have not arrived yet
errAEHandlerNotFound = -1717 #no handler in the dispatch tables fits the parameters to AEGetEventHandler or AEGetCoercionHandler
errAEUnknownAddressType = -1716 #the target address type is not known
errAEParamMissed = -1715 #a required parameter was not accessed
errAENotASpecialFunction = -1714 #there is no special function for/with this keyword
errAENoUserInteraction = -1713 #no user interaction is allowed
errAETimeout = -1712 #the AppleEvent timed out
errAEWaitCanceled = -1711 #in AESend, the user cancelled out of wait loop for reply or receipt
errAEUnknownSendMode = -1710 #mode wasn't NoReply, WaitReply, or QueueReply or Interaction level is unknown
errAEReplyNotValid = -1709 #AEResetTimer was passed an invalid reply parameter
errAEEventNotHandled = -1708 #the AppleEvent was not handled by any handler
errAENotAppleEvent = -1707 #the event is not in AppleEvent format
errAENewerVersion = -1706 #need newer version of the AppleEvent manager
errAEBadListItem = -1705 #the specified list item does not exist
errAENotAEDesc = -1704 #errAENotAEDesc
errAEWrongDataType = -1703 #errAEWrongDataType
errAECorruptData = -1702 #errAECorruptData
errAEDescNotFound = -1701 #errAEDescNotFound
errAECoercionFail = -1700 #bad parameter data or unable to coerce the data supplied
errFSIteratorNotSupported = -1424 #The iterator's flags or container are not supported by this call
errFSIteratorNotFound = -1423 #Passed FSIterator is not an open iterator
errFSBadIteratorFlags = -1422 #Flags passed to FSOpenIterator are bad
errFSForkExists = -1421 #Named fork already exists.
errFSRefsDifferent = -1420 #FSCompareFSRefs; refs are for different objects
errFSBadSearchParams = -1419 #Something wrong with CatalogSearch searchParams
errFSBadItemCount = -1418 #maximumItems was zero
errFSNoMoreItems = -1417 #Iteration ran out of items to return
errFSBadAllocFlags = -1413 #Invalid bits set in allocationFlags
errFSBadPosMode = -1412 #Newline bits set in positionMode
errFSMissingName = -1411 #A Unicode name parameter was NULL or nameLength parameter was zero
errFSNameTooLong = -1410 #File/fork name is too long to create/rename
errFSForkNotFound = -1409 #Named fork does not exist
errFSNotAFolder = -1407 #Expected a folder, got a file
errFSMissingCatInfo = -1406 #A CatalogInfo parameter was NULL
errFSBadInfoBitmap = -1405 #A CatalogInfoBitmap or VolumeInfoBitmap has reserved or invalid bits set
errFSBadForkRef = -1404 #A ForkRefNum parameter was bad
errFSBadBuffer = -1403 #A buffer parameter was bad
errFSBadForkName = -1402 #Fork name parameter is bad
errFSBadFSRef = -1401 #FSRef parameter is bad
errFSUnknownCall = -1400 #selector is not recognized by this filesystem
badFCBErr = -1327 #FCBRecPtr is not valid
volVMBusyErr = -1311 #can't eject because volume is in use by VM
fsDataTooBigErr = -1310 #file or volume is too big for system
fileBoundsErr = -1309 #file's EOF, offset, mark or size is too big
notARemountErr = -1308 #when _Mount allows only remounts and doesn't get one
badFidErr = -1307 #file id is dangling or doesn't match with the file number
sameFileErr = -1306 #can't exchange a file with itself
desktopDamagedErr = -1305 #desktop database files are corrupted
catChangedErr = -1304 #the catalog has been modified
diffVolErr = -1303 #files on different volumes
notAFileErr = -1302 #directory specified
fidExists = -1301 #file id already exists
fidNotFound = -1300 #no file thread exists.
errRefNum = -1280 #bad connection refNum
errAborted = -1279 #control call was aborted
errState = -1278 #bad connection state for this operation
errOpening = -1277 #open connection request failed
errAttention = -1276 #attention message too long
errFwdReset = -1275 #read terminated by forward reset
errDSPQueueSize = -1274 #DSP Read/Write Queue Too small
errOpenDenied = -1273 #open connection request was denied
reqAborted = -1105 #reqAborted
noDataArea = -1104 #noDataArea
noSendResp = -1103 #noSendResp
cbNotFound = -1102 #cbNotFound
noRelErr = -1101 #noRelErr
badBuffNum = -1100 #badBuffNum
badATPSkt = -1099 #badATPSkt
tooManySkts = -1098 #tooManySkts
tooManyReqs = -1097 #tooManyReqs
reqFailed = -1096 #reqFailed
aspNoAck = -1075 #No ack on attention request (server err)
aspTooMany = -1074 #Too many clients (server error)
aspSizeErr = -1073 #Command block too big
aspSessClosed = -1072 #Session closed
aspServerBusy = -1071 #Server cannot open another session
aspParamErr = -1070 #Parameter error
aspNoServers = -1069 #No servers at that address
aspNoMoreSess = -1068 #No more sessions on server
aspBufTooSmall = -1067 #Buffer too small
aspBadVersNum = -1066 #Server cannot support this ASP version
nbpNISErr = -1029 #Error trying to open the NIS
nbpNotFound = -1028 #Name not found on remove
nbpDuplicate = -1027 #Duplicate name exists already
nbpConfDiff = -1026 #Name confirmed at different socket
nbpNoConfirm = -1025 #nbpNoConfirm
nbpBuffOvr = -1024 #Buffer overflow in LookupName
noMaskFoundErr = -1000 #Icon Utilties Error
kFMFontContainerAccessErr = -985 #kFMFontContainerAccessErr
kFMFontTableAccessErr = -984 #kFMFontTableAccessErr
kFMIterationScopeModifiedErr = -983 #kFMIterationScopeModifiedErr
kFMInvalidFontErr = -982 #kFMInvalidFontErr
kFMInvalidFontFamilyErr = -981 #kFMInvalidFontFamilyErr
kFMIterationCompleted = -980 #kFMIterationCompleted
guestNotAllowedErr = -932 #destination port requires authentication
badLocNameErr = -931 #location name malformed
badServiceMethodErr = -930 #illegal service type, or not supported
noUserRecErr = -928 #Invalid user reference number
authFailErr = -927 #unable to authenticate user at destination
noInformErr = -926 #PPCStart failed because destination did not have inform pending
networkErr = -925 #An error has occurred in the network, not too likely
noUserRefErr = -924 #unable to create a new userRefNum
notLoggedInErr = -923 #The default userRefNum does not yet exist
noDefaultUserErr = -922 #user hasn't typed in owners name in Network Setup Control Pannel
badPortNameErr = -919 #PPCPortRec malformed
sessClosedErr = -917 #session was closed
portClosedErr = -916 #port was closed
noResponseErr = -915 #unable to contact destination
noToolboxNameErr = -914 #A system resource is missing, not too likely
noMachineNameErr = -913 #user hasn't named his Macintosh in the Network Setup Control Panel
userRejectErr = -912 #Destination rejected the session request
noUserNameErr = -911 #user name unknown on destination machine
portNameExistsErr = -910 #port is already open (perhaps in another app)
badReqErr = -909 #bad parameter or invalid state for operation
noSessionErr = -908 #Invalid session reference number
sessTableErr = -907 #Out of session tables, try again later
destPortErr = -906 #Port does not exist at destination
localOnlyErr = -905 #Network activity is currently disabled
noGlobalsErr = -904 #The system is hosed, better re-boot
noPortErr = -903 #Unable to open port or bad portRefNum. If you're calling
nameTypeErr = -902 #Invalid or inappropriate locationKindSelector in locationName
notInitErr = -900 #PPCToolBox not initialized
notAppropriateForClassic = -877 #This application won't or shouldn't run on Classic (Problem 2481058).
appVersionTooOld = -876 #The application's creator and version are incompatible with the current version of Mac OS.
wrongApplicationPlatform = -875 #The application could not launch because the required platform is not available
hmCloseViewActive = -863 #Returned from HMRemoveBalloon if CloseView was active
hmNoBalloonUp = -862 #Returned from HMRemoveBalloon if no balloon was visible when call was made
hmOperationUnsupported = -861 #Returned from HMShowBalloon call if bad method passed to routine
hmUnknownHelpType = -859 #Returned if help msg record contained a bad type
hmWrongVersion = -858 #Returned if help mgr resource was the wrong version
hmSkippedBalloon = -857 #Returned from calls if helpmsg specified a skip balloon
hmHelpManagerNotInited = -855 #Returned from HMGetHelpMenuHandle if help menu not setup
hmSameAsLastBalloon = -854 #Returned from HMShowMenuBalloon if menu & item is same as last time
hmBalloonAborted = -853 #Returned if mouse was moving or mouse wasn't in window port rect
hmHelpDisabled = -850 #Show Balloons mode was off, call to routine ignored
rcDBPackNotInited = -813 #attempt to call other routine before InitDBPack
rcDBWrongVersion = -812 #incompatible versions
rcDBNoHandler = -811 #no app handler for specified data type
rcDBBadAsyncPB = -810 #tried to kill a bad pb
rcDBAsyncNotSupp = -809 #ddev does not support async calls
rcDBBadDDEV = -808 #bad ddev specified on DBInit
rcDBBadSessNum = -807 #bad session number for DBGetConnInfo
rcDBBadSessID = -806 #rcDBBadSessID
rcDBExec = -805 #rcDBExec
rcDBBreak = -804 #rcDBBreak
rcDBBadType = -803 #rcDBBadType
rcDBError = -802 #rcDBError
rcDBValue = -801 #rcDBValue
rcDBNull = -800 #rcDBNull
icTooManyProfilesErr = -677 #too many profiles in database
icProfileNotFoundErr = -676 #profile not found
icConfigInappropriateErr = -675 #incorrect manufacturer code
icConfigNotFoundErr = -674 #no internet configuration was found
icNoURLErr = -673 #no URL found
icNothingToOverrideErr = -672 #no component for the override component to capture
icNoMoreWritersErr = -671 #you cannot begin a write session because someone else is already doing it
icTruncatedErr = -670 #more data was present than was returned
icInternalErr = -669 #Internet Config internal error
icPrefDataErr = -668 #problem with preference data
icPermErr = -667 #cannot set preference
icPrefNotFoundErr = -666 #Internet preference not found
vmInvalidOwningProcessErr = -648 #current process does not own the BackingFileID or FileViewID
vmAddressNotInFileViewErr = -647 #address is not in a FileView
vmNoMoreFileViewsErr = -646 #no more FileViews were found
vmFileViewAccessErr = -645 #requested FileViewAccess cannot be obtained
vmInvalidFileViewIDErr = -644 #invalid FileViewID
vmNoMoreBackingFilesErr = -643 #no more BackingFiles were found
vmBusyBackingFileErr = -642 #open views found on BackingFile
vmMappingPrivilegesErr = -641 #requested MappingPrivileges cannot be obtained
vmInvalidBackingFileIDErr = -640 #invalid BackingFileID
noMMUErr = -626 #no MMU present
cannotDeferErr = -625 #unable to defer additional functions
interruptsMaskedErr = -624 #donÕt call with interrupts masked
notLockedErr = -623 #specified range of memory is not locked
cannotMakeContiguousErr = -622 #cannot make specified range contiguous
notHeldErr = -621 #specified range of memory is not held
notEnoughMemoryErr = -620 #insufficient physical memory
threadProtocolErr = -619 #threadProtocolErr
threadNotFoundErr = -618 #threadNotFoundErr
threadTooManyReqsErr = -617 #threadTooManyReqsErr
noUserInteractionAllowed = -610 #no user interaction allowed
connectionInvalid = -609 #connectionInvalid
noOutstandingHLE = -608 #noOutstandingHLE
bufferIsSmall = -607 #error returns from Post and Accept
appIsDaemon = -606 #app is BG-only, and launch flags disallow this
appMemFullErr = -605 #application SIZE not big enough for launch
hardwareConfigErr = -604 #hardware configuration not correct for call
protocolErr = -603 #app made module calls in improper order
appModeErr = -602 #memory mode is 32-bit, but app not 32-bit clean
memFragErr = -601 #not enough room to launch app w/special requirements
procNotFound = -600 #no eligible process with specified descriptor
driverHardwareGoneErr = -503 #disk driver's hardware was disconnected
hwParamErr = -502 #bad selector for _HWPriv
teScrapSizeErr = -501 #scrap item too big for text edit record
rgnTooBigErr = -500 #rgnTooBigErr
exUserBreak = -492 #user debugger break; execute debugger commands on stack
strUserBreak = -491 #user debugger break; display string on stack
userBreak = -490 #user debugger break
notThePublisherWrn = -463 #not the first registered publisher for that container
containerAlreadyOpenWrn = -462 #container already opened by this section
containerNotFoundWrn = -461 #could not find editionContainer at this time
multiplePublisherWrn = -460 #A Publisher is already registered for that container
badSubPartErr = -454 #can not use sub parts in this release
badEditionFileErr = -453 #edition file is corrupt
notRegisteredSectionErr = -452 #not a registered SectionRecord
badSectionErr = -451 #not a valid SectionRecord
editionMgrInitErr = -450 #edition manager not inited by this app
fsmUnknownFSMMessageErr = -438 #unknown message passed to FSM
fsmNoAlternateStackErr = -437 #no alternate stack for HFS CI
fsmBadFSDVersionErr = -436 #FSM version incompatible with FSD
fsmDuplicateFSIDErr = -435 #FSID already exists on InstallFS
fsmBadFSDLenErr = -434 #FSD size incompatible with current FSM vers
fsmBadFFSNameErr = -433 #Name length not 1 <= length <= 31
fsmBusyFFSErr = -432 #File system is busy, cannot be removed
fsmFFSNotFoundErr = -431 #Foreign File system does not exist - new Pack2 could return this error too
btKeyAttrErr = -417 #There is no such a key attribute.
btKeyLenErr = -416 #Maximum key length is too long or equal to zero.
btRecNotFnd = -415 #Record cannot be found.
btDupRecErr = -414 #Record already exists.
btNoSpace = -413 #Can't allocate disk space.
notBTree = -410 #The file is not a dictionary.
gcrOnMFMErr = -400 #gcr format on high density media error
slotNumErr = -360 #invalid slot # error
smRecNotFnd = -351 #Record not found in the SRT.
smSRTOvrFlErr = -350 #SRT over flow.
smNoGoodOpens = -349 #No opens were successfull in the loop.
smOffsetErr = -348 #Offset was too big (temporary error
smByteLanesErr = -347 #NumByteLanes was determined to be zero.
smBadsPtrErr = -346 #Bad pointer was passed to sCalcsPointer
smsGetDrvrErr = -345 #Error occurred during _sGetDriver.
smNoMoresRsrcs = -344 #No more sResources
smDisDrvrNamErr = -343 #Error occurred during _sDisDrvrName.
smGetDrvrNamErr = -342 #Error occurred during _sGetDrvrName.
smCkStatusErr = -341 #Status of slot = fail.
smBlkMoveErr = -340 #_BlockMove error
smNewPErr = -339 #_NewPtr error
smSelOOBErr = -338 #Selector out of bounds error
smSlotOOBErr = -337 #Slot out of bounds error
smNilsBlockErr = -336 #Nil sBlock error (Dont allocate and try to use a nil sBlock)
smsPointerNil = -335 #LPointer is nil From sOffsetData. If this error occurs; check sInfo rec for more information.
smCPUErr = -334 #Code revision is wrong
smCodeRevErr = -333 #Code revision is wrong
smReservedErr = -332 #Reserved field not zero
smBadsList = -331 #Bad sList: Id1 < Id2 < Id3 ...format is not followed.
smBadRefId = -330 #Reference Id not found in List
smBusErrTO = -320 #BusError time out.
smBadBoardId = -319 #BoardId was wrong; re-init the PRAM record.
smReservedSlot = -318 #slot is reserved, VM should not use this address space.
smInitTblVErr = -317 #An error occurred while trying to initialize the Slot Resource Table.
smInitStatVErr = -316 #The InitStatusV field was negative after primary or secondary init.
smNoBoardId = -315 #No Board Id.
smGetPRErr = -314 #Error occurred during _sGetPRAMRec (See SIMStatus).
smNoBoardSRsrc = -313 #No Board sResource.
smDisposePErr = -312 #_DisposePointer error
smFHBlkDispErr = -311 #Error occurred during _sDisposePtr (Dispose of FHeader block).
smFHBlockRdErr = -310 #Error occurred during _sGetFHeader.
smBLFieldBad = -309 #ByteLanes field was bad.
smUnExBusErr = -308 #Unexpected BusError
smResrvErr = -307 #Fatal reserved error. Resreved field <> 0.
smNosInfoArray = -306 #No sInfoArray. Memory Mgr error.
smDisabledSlot = -305 #This slot is disabled (-305 use to be smLWTstBad)
smNoDir = -304 #Directory offset is Nil
smRevisionErr = -303 #Wrong revison level
smFormatErr = -302 #FHeader Format is not Apple's
smCRCFail = -301 #CRC check failed for declaration data
smEmptySlot = -300 #No card in slot
nmTypErr = -299 #Notification Manager:wrong queue type
smPriInitErr = -293 #Error; Cards could not be initialized.
smPRAMInitErr = -292 #Error; Slot Resource Table could not be initialized.
smSRTInitErr = -291 #Error; Slot Resource Table could not be initialized.
smSDMInitErr = -290 #Error; SDM could not be initialized.
midiInvalidCmdErr = -261 #command not supported for port type
midiDupIDErr = -260 #duplicate client ID
midiNameLenErr = -259 #name supplied is longer than 31 characters
midiWriteErr = -258 #MIDIWritePacket couldn't write to all connected ports
midiNoConErr = -257 #no connection exists between specified ports
midiVConnectRmvd = -256 #pending virtual connection removed
midiVConnectMade = -255 #pending virtual connection resolved
midiVConnectErr = -254 #pending virtual connection created
midiTooManyConsErr = -253 #too many connections made
midiTooManyPortsErr = -252 #too many ports already installed in the system
midiNoPortErr = -251 #no port with that ID found
midiNoClientErr = -250 #no client with that ID found
badInputText = -247 #badInputText
badDictFormat = -246 #badDictFormat
incompatibleVoice = -245 #incompatibleVoice
voiceNotFound = -244 #voiceNotFound
bufTooSmall = -243 #bufTooSmall
synthNotReady = -242 #synthNotReady
synthOpenFailed = -241 #synthOpenFailed
noSynthFound = -240 #noSynthFound
siUnknownQuality = -232 #invalid quality selector (returned by driver)
siUnknownInfoType = -231 #invalid info type selector (returned by driver)
siInputDeviceErr = -230 #input device hardware failure
siBadRefNum = -229 #invalid input device reference number
siBadDeviceName = -228 #input device could not be opened
siDeviceBusyErr = -227 #input device already in use
siInvalidSampleSize = -226 #invalid sample size
siInvalidSampleRate = -225 #invalid sample rate
siHardDriveTooSlow = -224 #hard drive too slow to record to disk
siInvalidCompression = -223 #invalid compression type
siNoBufferSpecified = -222 #returned by synchronous SPBRecord if nil buffer passed
siBadSoundInDevice = -221 #invalid index passed to SoundInGetIndexedDevice
siNoSoundInHardware = -220 #no Sound Input hardware
siVBRCompressionNotSupported = -213 #vbr audio compression not supported for this operation
noMoreRealTime = -212 #not enough CPU cycles left to add another task
channelNotBusy = -211 #channelNotBusy
buffersTooSmall = -210 #can not operate in the memory allowed
channelBusy = -209 #the Channel is being used for a PFD already
badFileFormat = -208 #was not type AIFF or was of bad format,corrupt
notEnoughBufferSpace = -207 #could not allocate enough memory
badFormat = -206 #Sound Manager Error Returns
badChannel = -205 #Sound Manager Error Returns
resProblem = -204 #Sound Manager Error Returns
queueFull = -203 #Sound Manager Error Returns
notEnoughHardwareErr = -201 #Sound Manager Error Returns
noHardwareErr = -200 #Sound Manager Error Returns
mapReadErr = -199 #map inconsistent with operation
resAttrErr = -198 #attribute inconsistent with operation
rmvRefFailed = -197 #RmveReference failed
rmvResFailed = -196 #RmveResource failed
addRefFailed = -195 #AddReference failed
addResFailed = -194 #AddResource failed
resFNotFound = -193 #Resource file not found
resNotFound = -192 #Resource not found
inputOutOfBounds = -190 #Offset of Count out of bounds
writingPastEnd = -189 #Writing past end of file
resourceInMemory = -188 #Resource already in memory
CantDecompress = -186 #resource bent ("the bends") - can't decompress a compressed resource
badExtResource = -185 #extended resource has a bad format.
cmNoCurrentProfile = -182 #Responder error
cmUnsupportedDataType = -181 #Responder error
cmCantDeleteProfile = -180 #Responder error
cmCantXYZ = -179 #CMM cant handle XYZ space
cmCantConcatenateError = -178 #Profile can't be concatenated
cmProfilesIdentical = -177 #Profiles the same
cmProfileNotFound = -176 #Responder error
cmMethodNotFound = -175 #CMM not present
cmMethodError = -171 #cmMethodError
cmProfileError = -170 #cmProfileError
cDepthErr = -157 #invalid pixel depth
cResErr = -156 #invalid resolution for MakeITable
cDevErr = -155 #invalid type of graphics device
cProtectErr = -154 #colorTable entry protection violation
cRangeErr = -153 #range error on colorTable request
cNoMemErr = -152 #failed to allocate memory for structure
cTempMemErr = -151 #failed to allocate memory for temporary structures
cMatchErr = -150 #Color2Index failed to find an index
insufficientStackErr = -149 #insufficientStackErr
pixMapTooDeepErr = -148 #pixMapTooDeepErr
rgnOverflowErr = -147 #rgnOverflowErr
noMemForPictPlaybackErr = -145 #noMemForPictPlaybackErr
userCanceledErr = -128 #userCanceledErr
hMenuFindErr = -127 #could not find HMenu's parent in MenuKey (wrong error code - obsolete)
mBarNFnd = -126 #system error code for MBDF not found
updPixMemErr = -125 #insufficient memory to update a pixmap
volGoneErr = -124 #Server volume has been disconnected.
wrgVolTypErr = -123 #Wrong volume type error [operation not supported for MFS]
badMovErr = -122 #Move into offspring error
tmwdoErr = -121 #No free WDCB available
dirNFErr = -120 #Directory not found
memLockedErr = -117 #trying to move a locked block (MoveHHi)
memSCErr = -116 #Size Check failed
memBCErr = -115 #Block Check failed
memPCErr = -114 #Pointer Check failed
memAZErr = -113 #Address in zone check failed
memPurErr = -112 #trying to purge a locked or non-purgeable block
memWZErr = -111 #WhichZone failed (applied to free block)
memAdrErr = -110 #address was odd; or out of range
nilHandleErr = -109 #Master Pointer was NIL in HandleZone or other
memFullErr = -108 #Not enough room in heap zone
noTypeErr = -102 #No object of that type in scrap
noScrapErr = -100 #No scrap exists error
memROZWarn = -99 #soft error in ROZ
portNotCf = -98 #driver Open error code (parameter RAM not configured for this connection)
portInUse = -97 #driver Open error code (port is in use)
portNotPwr = -96 #serial port not currently powered
excessCollsns = -95 #excessive collisions on write
lapProtErr = -94 #error in attaching/detaching protocol
noBridgeErr = -93 #no network bridge for non-local send
eLenErr = -92 #Length error ddpLenErr
eMultiErr = -91 #Multicast address error ddpSktErr
breakRecd = -90 #Break received (SCC)
rcvrErr = -89 #SCC receiver error (framing; parity; OR)
prInitErr = -88 #InitUtil found the parameter ram uninitialized
prWrErr = -87 #parameter ram written didn't read-verify
clkWrErr = -86 #time written did not verify
clkRdErr = -85 #unable to read same clock value twice
verErr = -84 #track failed to verify
fmt2Err = -83 #can't get enough sync
fmt1Err = -82 #can't find sector 0 after track format
sectNFErr = -81 #sector number never found on a track
seekErr = -80 #track number wrong on address mark
spdAdjErr = -79 #unable to correctly adjust disk speed
twoSideErr = -78 #tried to read 2nd side on a 1-sided drive
initIWMErr = -77 #unable to initialize IWM
tk0BadErr = -76 #track 0 detect doesn't change
cantStepErr = -75 #step handshake failed
wrUnderrun = -74 #write underrun occurred
badDBtSlp = -73 #bad data mark bit slip nibbles
badDCksum = -72 #bad data mark checksum
noDtaMkErr = -71 #couldn't find a data mark header
badBtSlpErr = -70 #bad addr mark bit slip nibbles
badCksmErr = -69 #addr mark checksum didn't check
dataVerErr = -68 #read verify compare failed
noAdrMkErr = -67 #couldn't find valid addr mark
noNybErr = -66 #couldn't find 5 nybbles in 200 tries
offLinErr = -65 #r/w requested for an off-line drive
fontDecError = -64 #error during font declaration
wrPermErr = -61 #write permissions error
badMDBErr = -60 #bad master directory block
fsRnErr = -59 #file system internal error:during rename the old entry was deleted but could not be restored.
extFSErr = -58 #volume in question belongs to an external fs
noMacDskErr = -57 #not a mac diskette (sig bytes are wrong)
nsDrvErr = -56 #no such drive (tried to mount a bad drive num)
volOnLinErr = -55 #drive volume already on-line at MountVol
permErr = -54 #permissions error (on file open)
volOffLinErr = -53 #volume not on line error (was Ejected)
gfpErr = -52 #get file position error
rfNumErr = -51 #refnum error
paramErr = -50 #error in user parameter list
opWrErr = -49 #file already open with with write permission
dupFNErr = -48 #duplicate filename (rename)
fBsyErr = -47 #File is busy (delete)
vLckdErr = -46 #volume is locked
fLckdErr = -45 #file is locked
wPrErr = -44 #diskette is write protected.
fnfErr = -43 #File not found
tmfoErr = -42 #too many files open
mFulErr = -41 #memory full (open) or file won't fit (load)
posErr = -40 #tried to position to before start of file (r/w)
eofErr = -39 #End of file
fnOpnErr = -38 #File not open
bdNamErr = -37 #there may be no bad names in the final system!
ioErr = -36 #I/O error (bummers)
nsvErr = -35 #no such volume
dskFulErr = -34 #disk full
dirFulErr = -33 #Directory full
dceExtErr = -30 #dce extension error
unitTblFullErr = -29 #unit table has no more entries
notOpenErr = -28 #Couldn't rd/wr/ctl/sts cause driver not opened
iIOAbortErr = -27 #IO abort error (Printing Manager)
dInstErr = -26 #DrvrInstall couldn't find driver in resources
dRemovErr = -25 #tried to remove an open driver
closErr = -24 #I/O System Errors
openErr = -23 #I/O System Errors
unitEmptyErr = -22 #I/O System Errors
badUnitErr = -21 #I/O System Errors
writErr = -20 #I/O System Errors
readErr = -19 #I/O System Errors
statusErr = -18 #I/O System Errors
controlErr = -17 #I/O System Errors
dsExtensionsDisabled = -13 #say Extensions Disabled
dsHD20Installed = -12 #say HD20 Startup
dsDisassemblerInstalled = -11 #say Disassembler Installed
dsMacsBugInstalled = -10 #say MacsBug Installed
seNoDB = -8 #no debugger installed to handle debugger command
SlpTypeErr = -5 #invalid queue element
unimpErr = -4 #unimplemented core routine
corErr = -3 #core routine number out of range
dsNoExtsDisassembler = -2 #not a SysErr, just a placeholder
qErr = -1 #queue element not found during deletion
tsmComponentNoErr = 0 #component result = no error
EPERM = 1 #Operation not permitted
ENOENT = 2 #No such file or directory
ESRCH = 3 #No such process
EINTR = 4 #Interrupted system call
EIO = 5 #Input/output error
ENXIO = 6 #Device not configured
E2BIG = 7 #Argument list too long
ENOEXEC = 8 #Exec format error
EBADF = 9 #Bad file descriptor
ECHILD = 10 #No child processes
EDEADLK = 11 #Resource deadlock avoided
ENOMEM = 12 #Cannot allocate memory
EACCES = 13 #Permission denied
EFAULT = 14 #Bad address
ECANCELED = 15 #Operation cancelled
EBUSY = 16 #Device busy
EEXIST = 17 #File exists
EXDEV = 18 #Cross-device link
ENODEV = 19 #Operation not supported by device
ENOTDIR = 20 #Not a directory
EISDIR = 21 #Is a directory
EINVAL = 22 #Invalid argument
ENFILE = 23 #Too many open files in system
EMFILE = 24 #Too many open files
ENOTTY = 25 #Inappropriate ioctl for device
ESIGPARM = 26 #Signal error
EFBIG = 27 #File too large
ENOSPC = 28 #No space left on device
ESPIPE = 29 #Illegal seek
EROFS = 30 #Read-only file system
EMLINK = 31 #Too many links
EPIPE = 32 #Broken pipe
EDOM = 33 #Numerical argument out of domain
ERANGE = 34 #Result too large
EAGAIN = 35 #Resource temporarily unavailable
EINPROGRESS = 36 #Operation now in progress
EALREADY = 37 #Operation already in progress
ENOTSOCK = 38 #Socket operation on non-socket
EDESTADDRREQ = 39 #Destination address required
EMSGSIZE = 40 #Message too long
EPROTOTYPE = 41 #Protocol wrong type for socket
ENOPROTOOPT = 42 #Protocol not available
EPROTONOSUPPORT = 43 #Protocol not supported
ESOCKTNOSUPPORT = 44 #Socket type not supported
EOPNOTSUPP = 45 #Operation not supported
EPFNOSUPPORT = 46 #Protocol family not supported
EAFNOSUPPORT = 47 #Address family not supported by protocol family
EADDRINUSE = 48 #Address already in use
EADDRNOTAVAIL = 49 #Can't assign requested address
ENETDOWN = 50 #Network is down
ENETUNREACH = 51 #Network is unreachable
ENETRESET = 52 #Network dropped connection on reset
ECONNABORTED = 53 #Software caused connection abort
ECONNRESET = 54 #Connection reset by peer
ENOBUFS = 55 #No buffer space available
EISCONN = 56 #Socket is already connected
ENOTCONN = 57 #Socket is not connected
ESHUTDOWN = 58 #Can't send after socket shutdown
ETOOMANYREFS = 59 #Too many references: can't splice
ETIMEDOUT = 60 #Operation timed out
ECONNREFUSED = 61 #Connection refused
ELOOP = 62 #Too many levels of symbolic links
ENAMETOOLONG = 63 #File name too long
EHOSTDOWN = 64 #Host is down
EHOSTUNREACH = 65 #No route to host
ENOTEMPTY = 66 #Directory not empty
ELOOK = 67 #Internal mapping for kOTLookErr, don't return to client
ENOLCK = 77 #No locks available
ENOSYS = 78 #Function not implemented
EILSEQ = 88 #Wide character encoding error
EUNKNOWN = 99 #Unknown error
| apache-2.0 |
taknevski/tensorflow-xsmm | tensorflow/contrib/distributions/python/ops/distribution_util.py | 16 | 29861 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import math
import numpy as np
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import linalg
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def assert_close(
x, y, data=None, summarize=None, message=None, name="assert_close"):
"""Assert that that x and y are within machine epsilon of each other.
Args:
x: Floating-point `Tensor`
y: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
"""
message = message or ""
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
if data is None:
data = [
message,
"Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
y.name, y
]
if x.dtype.is_integer:
return check_ops.assert_equal(
x, y, data=data, summarize=summarize, message=message, name=name)
with ops.name_scope(name, "assert_close", [x, y, data]):
tol = np.finfo(x.dtype.as_numpy_dtype).eps
condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
return control_flow_ops.Assert(
condition, data, summarize=summarize)
def assert_integer_form(
x, data=None, summarize=None, message=None, name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if round(x) != x.
"""
message = message or "x has non-integer components"
x = ops.convert_to_tensor(x, name="x")
casted_x = math_ops.to_int64(x)
return check_ops.assert_equal(
x, math_ops.cast(math_ops.round(casted_x), x.dtype),
data=data, summarize=summarize, message=message, name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_discrete(x, check_integer=True):
"""Assert x is a non-negative tensor, and optionally of integers."""
assertions = [check_ops.assert_non_negative(
x, message="x must be non-negative.")]
if check_integer:
assertions += [assert_integer_form(
x, message="x cannot contain fractional components.")]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(math_ops.equal(
array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal,
lambda: constant_op.constant(False))
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs"):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`.
If `True`, represents whether the last dimension of `logits` or `probs`,
a `[N1, N2, ... k]` dimensional tensor, representing the
logit or probability of `shape[-1]` classes.
validate_args: Python `bool`, default `False`. When `True`, either assert
`0 <= probs <= 1` (if not `multidimensional`) or that the last dimension
of `probs` sums to one.
name: A name for this operation (optional).
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits")
if multidimensional:
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
dependencies += [assert_close(math_ops.reduce_sum(probs, -1), one,
message="probs does not sum to 1.")]
else:
dependencies += [check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return math_ops.log(probs), probs
return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# OperatorPDCholesky ignores the upper triangle.
operator = OperatorPDCholesky(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tf.contrib.distributions.MVNCholesky(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To
be applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops.
Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = ... # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1) # result shape: [2, 3, 4, 1]
rotate_transpose(x, -2) # result shape: [3, 4, 1, 2]
rotate_transpose(x, 1) # result shape: [4, 1, 2, 3]
rotate_transpose(x, 2) # result shape: [3, 4, 1, 2]
rotate_transpose(x, 7) == rotate_transpose(x, 3)
rotate_transpose(x, -7) == rotate_transpose(x, -3)
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with ops.name_scope(name, values=[x, shift]):
x = ops.convert_to_tensor(x, name="x")
shift = ops.convert_to_tensor(shift, name="shift")
# We do not assign back to preserve constant-ness.
check_ops.assert_integer(shift)
shift_value_static = tensor_util.constant_value(shift)
ndims = x.get_shape().ndims
if ndims is not None and shift_value_static is not None:
if ndims < 2: return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0: return x
perm = np.roll(np.arange(ndims), shift_value_static)
return array_ops.transpose(x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = array_ops.rank(x)
shift = array_ops.where(math_ops.less(shift, 0),
math_ops.mod(-shift, ndims),
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
perm = array_ops.concat([last, first], 0)
return array_ops.transpose(x, perm=perm)
def pick_vector(cond,
true_vector,
false_vector,
name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example:
```python
pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [10, 11].
pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18))
# result is tensor: [15, 16, 17].
```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond.name, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s"
% (true_vector.name, true_vector.dtype,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
array_ops.concat([true_vector, false_vector], 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
"""Creates a (batch of) lower triangular matrix from a vector of inputs.
If `x.get_shape()` is `[b1, b2, ..., bK, d]` then the output shape is `[b1,
b2, ..., bK, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))`.
Although the non-batch complexity is O(n**2), large constants and sub-optimal
vectorization means the complexity of this function is 5x slower than zeroing
out the upper triangular, i.e., `tf.matrix_band_part(X, -1, 0)`. This
function becomes competitive only when several matmul/cholesky/etc ops can be
ellided in constructing the input. Example: wiring a fully connected layer as
a covariance matrix; this function reduces the final layer by 2x and possibly
reduces the network arch complexity considerably. In most cases it is better
to simply build a full matrix and zero out the upper triangular elements,
e.g., `tril = tf.matrix_band_part(full, -1, 0)`, rather than directly
construct a lower triangular.
Example:
```python
fill_lower_triangular([1, 2, 3, 4, 5, 6])
# Returns: [[1, 0, 0],
# [2, 3, 0],
# [4, 5, 6]]
```
For comparison, a pure numpy version of this function can be found in
`distribution_util_test.py`, function `_fill_lower_triangular`.
Args:
x: `Tensor` representing lower triangular elements.
validate_args: Python `bool`, default `False`. Whether to ensure the shape
of `x` can be mapped to a lower triangular matrix (controls non-static
checks only).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower triangular elements filled from `x`.
Raises:
ValueError: if shape if `x` has static shape which cannot be mapped to a
lower triangular matrix.
"""
# TODO(jvdillon): Replace this code with dedicated op when it exists.
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (x.get_shape().ndims is not None and
x.get_shape()[-1].value is not None):
d = x.get_shape()[-1].value
# d = n(n+1)/2 implies n is:
n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
d_inferred = n * (n + 1) /2
if d != d_inferred:
raise ValueError("Input cannot be mapped to a lower triangular; "
"n*(n+1)/2 = %d != %d" % (d_inferred, d))
final_shape = x.get_shape()[:-1].concatenate(
tensor_shape.TensorShape([n, n]))
else:
d = math_ops.cast(array_ops.shape(x)[-1], dtype=dtypes.float32)
# d = n(n+1)/2 implies n is:
n = math_ops.cast(0.5 * (dtypes.sqrt(1. + 8. * d) - 1.),
dtype=dtypes.int32)
if validate_args:
is_valid_input_shape = check_ops.assert_equal(
n * (n + 1) / 2, d,
message="Input cannot be mapped to a lower triangular.")
n = control_flow_ops.with_dependencies([is_valid_input_shape], n)
final_shape = x.get_shape()[:-1].concatenate(
tensor_shape.TensorShape([None, None]))
def tril_ids(n):
"""Internal helper to create vector of linear indices into y."""
# Build the ids statically; chose 512 because it implies 1MiB.
if not contrib_framework.is_tensor(n) and n <= 512:
ids = np.arange(n**2, dtype=np.int32)
rows = (ids / n).astype(np.int32) # Implicit floor.
# We need to stop incrementing the index when we encounter
# upper-triangular elements. The idea here is to compute the
# lower-right number of zeros then by "symmetry" subtract this from the
# total number of zeros, n(n-1)/2.
# Then we note that: n(n-1)/2 - (n-r)*(n-r-1)/2 = r(2n-r-1)/2
offset = (rows * (2 * n - rows - 1) / 2).astype(np.int32)
# We could also zero out when (rows < cols) == (rows < ids-n*rows).
# mask = (ids <= (n + 1) * rows).astype(np.int32)
else:
ids = math_ops.range(n**2)
rows = math_ops.cast(ids / n, dtype=dtypes.int32)
offset = math_ops.cast(rows * (2 * n - rows - 1) / 2,
dtype=dtypes.int32)
return ids - offset
# Special-case non-batch case.
if x.get_shape().ndims == 1:
y = array_ops.gather(x, array_ops.reshape(tril_ids(n), [n, n]))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
# Make ids for each batch dim.
if (x.get_shape().ndims is not None and
x.get_shape()[:-1].is_fully_defined()):
batch_shape = np.asarray(x.get_shape()[:-1].as_list(), dtype=np.int32)
m = np.prod(batch_shape).astype(np.int32)
else:
batch_shape = array_ops.shape(x)[:-1]
m = array_ops.reduce_prod(array_ops.shape(x)[:-1])
batch_ids = math_ops.range(m)
# Assemble the tril_ids into batch,tril_id pairs.
idx = array_ops.stack([
array_ops.tile(array_ops.expand_dims(batch_ids, 1), [1, n * n]),
array_ops.tile(array_ops.expand_dims(tril_ids(n), 0), [m, 1])
])
idx = array_ops.transpose(idx, [1, 2, 0])
# Gather up, reshape, and return.
y = array_ops.reshape(x, [-1, d])
y = array_ops.gather_nd(y, idx)
y = array_ops.reshape(y, array_ops.concat([batch_shape, [n, n]], 0))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/ops/softplus_op_test.py
# once TF core is accepting new ops.
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
array_ops.ones_like(x), x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
array_ops.where(is_too_large, too_large_value, y))
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
num_rows = (None if x.get_shape().ndims is None
else x.get_shape()[axis].value)
if num_rows is not None:
return num_rows
return array_ops.shape(x)[axis]
# TODO(b/35290280): Add unit-tests.
def make_diag_scale(loc, scale_diag, scale_identity_multiplier,
validate_args, assert_positive, name=None):
"""Creates a LinOp from `scale_diag`, `scale_identity_multiplier` kwargs."""
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x, message="diagonal part must be positive"),
], x)
# TODO(b/35157376): Use `assert_none_equal` once it exists.
return control_flow_ops.with_dependencies([
check_ops.assert_greater(
math_ops.abs(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_diag_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., array_ops.newaxis]
return linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
# TODO(b/35290280): Consider inferring shape from scale_perturb_factor.
if loc is None:
raise ValueError(
"Cannot infer `event_shape` unless `loc` is specified.")
num_rows = dimension_size(loc, -1)
if scale_identity_multiplier is None:
return linalg.LinearOperatorIdentity(
num_rows=num_rows,
dtype=loc.dtype.base_dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return linalg.LinearOperatorScaledIdentity(
num_rows=num_rows,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args)
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing
specific kwargs expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError(
"Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" +
"\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
| apache-2.0 |
shanez/django-moneyclip | moneyclip/moneyclip.py | 1 | 1919 | from django.db import models
from .machine import CacheMachine
class PickleFieldCache(models.Model):
class Meta:
abstract = True
_cached_fields = []
class CachedForeignKey(models.ForeignKey):
def contribute_to_class(self, cls, name):
super(CachedForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, CachedReverseSingleRelatedObjectDescriptor(self))
class CachedOneToOneField(models.OneToOneField, CachedForeignKey):
''' Does not cache the reverse relationship '''
pass
class CachedReverseSingleRelatedObjectDescriptor(models.fields.related.ReverseSingleRelatedObjectDescriptor):
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_obj = None
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
model = self.field.related.parent_model
model_has_cache = hasattr(model, 'cache')
lookup_id = getattr(instance, self.field.name+'_id')
# if getattr(instance, is_cached, True):
if lookup_id:
if model_has_cache:
try:
rel_obj = model.get_cache(id=lookup_id)
except model.DoesNotExist:
pass
if rel_obj:
setattr(instance, self.cache_name, rel_obj)
if not rel_obj:
rel_obj = super(CachedReverseSingleRelatedObjectDescriptor, self).__get__(instance, instance_type=instance_type)
# if getattr(instance, is_cached, False):
if model_has_cache:
model.set_cache(rel_obj, id=lookup_id)
if rel_obj == CacheMachine.DOES_NOT_EXIST:
raise self.field.related.model.DoesNotExist
else:
return rel_obj
| gpl-3.0 |
peak6/st2 | st2common/st2common/util/ip_utils.py | 10 | 3535 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import ipaddr
from st2common.log import logging
LOG = logging.getLogger(__name__)
__all__ = [
'is_ipv4',
'is_ipv6',
'split_host_port'
]
BRACKET_PATTERN = "^\[.*\]" # IPv6 bracket pattern to specify port
COMPILED_BRACKET_PATTERN = re.compile(BRACKET_PATTERN)
HOST_ONLY_IN_BRACKET = "^\[.*\]$"
COMPILED_HOST_ONLY_IN_BRACKET_PATTERN = re.compile(HOST_ONLY_IN_BRACKET)
def is_ipv6(ip_str):
"""
Validate whether given string is IPv6.
:param ip_str: String to validate.
:type ip_str: ``str``
:rtype: ``bool``
"""
try:
addr = ipaddr.IPAddress(ip_str)
return addr.version == 6
except:
return False
def is_ipv4(ip_str):
"""
Validate whether given string is IPv4.
:param ip_str: String to validate.
:type ip_str: ``str``
:rtype: ``bool``
"""
try:
addr = ipaddr.IPAddress(ip_str)
return addr.version == 4
except:
return False
def split_host_port(host_str):
"""
Split host_str into host and port.
Can handle IPv4, IPv6, hostname inside or outside brackets.
Note: If you want to specify a port with IPv6, you definitely
should enclose IP address within [].
:param host_str: Host port string.
:type host_str: ``str``
:return: Hostname (string), port (int) tuple. Raises exception on invalid port.
:rtype: ``tuple`` of ``str`` and ``int``
"""
hostname = host_str
port = None
# If it's simple IPv6 or IPv4 address, return here.
if is_ipv6(host_str) or is_ipv4(host_str):
return (hostname, port)
# Check if it's square bracket style.
match = COMPILED_BRACKET_PATTERN.match(host_str)
if match:
LOG.debug('Square bracket style.')
# Check if square bracket style no port.
match = COMPILED_HOST_ONLY_IN_BRACKET_PATTERN.match(host_str)
if match:
hostname = match.group().strip('[]')
return (hostname, port)
hostname, separator, port = hostname.rpartition(':')
try:
LOG.debug('host_str: %s, hostname: %s port: %s' % (host_str, hostname, port))
port = int(port)
hostname = hostname.strip('[]')
return (hostname, port)
except:
raise Exception('Invalid port %s specified.' % port)
else:
LOG.debug('Non-bracket address. host_str: %s' % host_str)
if ':' in host_str:
LOG.debug('Non-bracket with port.')
hostname, separator, port = hostname.rpartition(':')
try:
port = int(port)
return (hostname, port)
except:
raise Exception('Invalid port %s specified.' % port)
return (hostname, port)
| apache-2.0 |
jessrosenfield/pants | src/python/pants/backend/jvm/repository.py | 17 | 1337 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
class Repository(object):
"""An artifact repository, such as a maven repo."""
def __init__(self, name=None, url=None, push_db_basedir=None, **kwargs):
"""
:param string url: Optional URL of the repository.
:param string push_db_basedir: Push history file base directory.
"""
self.name = name
self.url = url
self.push_db_basedir = push_db_basedir
def push_db(self, target):
return os.path.join(self.push_db_basedir,
target.provides.org,
target.provides.name,
'publish.properties')
def __eq__(self, other):
return (
isinstance(other, Repository) and
(self.name, self.url, self.push_db_basedir) == (other.name, other.url, other.push_db_basedir)
)
def __hash__(self):
return hash((self.name, self.url, self.push_db_basedir))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "{} -> {} ({})".format(self.name, self.url, self.push_db_basedir)
| apache-2.0 |
atuljain/odoo | addons/account_analytic_analysis/__openerp__.py | 62 | 2311 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Contracts Management',
'version': '1.1',
'category': 'Sales Management',
'description': """
This module is for modifying account analytic view to show important data to project manager of services companies.
===================================================================================================================
Adds menu to show relevant information to each manager.You can also view the report of account analytic summary user-wise as well as month-wise.
""",
'author': 'Camptocamp',
'website': 'http://www.camptocamp.com/',
'images': ['images/bill_tasks_works.jpeg','images/overpassed_accounts.jpeg'],
'depends': ['hr_timesheet_invoice', 'sale'], #although sale is technically not required to install this module, all menuitems are located under 'Sales' application
'data': [
'security/ir.model.access.csv',
'security/account_analytic_analysis_security.xml',
'account_analytic_analysis_view.xml',
'account_analytic_analysis_cron.xml',
'res_config_view.xml',
'views/account_analytic_analysis.xml',
],
'demo': ['analytic_account_demo.xml'],
'test': ['test/account_analytic_analysis.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.