repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Elico-Corp/odoo_OCB | addons/fetchmail/res_config.py | 46 | 4279 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class fetchmail_config_settings(osv.osv_memory):
""" This wizard can be inherited in conjunction with 'res.config.settings', in order to
define fields that configure a fetchmail server.
It relies on the following convention on the object::
class my_config_settings(osv.osv_memory):
_name = 'my.settings'
_inherit = ['res.config.settings', 'fetchmail.config.settings']
_columns = {
'fetchmail_stuff': fields.boolean(...,
fetchmail_model='my.stuff', fetchmail_name='Incoming Stuff'),
}
def configure_fetchmail_stuff(self, cr, uid, ids, context=None):
return self.configure_fetchmail(cr, uid, 'fetchmail_stuff', context)
and in the form view::
<field name="fetchmail_stuff"/>
<button type="object" name="configure_fetchmail_stuff"/>
The method ``get_default_fetchmail`` determines the value of all fields that start
with 'fetchmail_'. It looks up fetchmail server configurations that match the given
model name (``fetchmail_model``) and are active.
The button action ``configure_fetchmail_stuff`` is caught by the object, and calls
automatically the method ``configure_fetchmail``; it opens the fetchmail server
configuration form for the corresponding field.
"""
_name = 'fetchmail.config.settings'
def get_default_fetchmail(self, cr, uid, fields, context=None):
""" determine the value of all fields like 'fetchmail_XXX' """
ir_model = self.pool.get('ir.model')
fetchmail_server = self.pool.get('fetchmail.server')
fetchmail_fields = [f for f in fields if f.startswith('fetchmail_')]
res = {}
for f in fetchmail_fields:
model_name = self._columns[f].fetchmail_model
model_id = ir_model.search(cr, uid, [('model', '=', model_name)])[0]
server_ids = fetchmail_server.search(cr, uid, [('object_id', '=', model_id), ('state', '=', 'done')])
res[f] = bool(server_ids)
return res
def set_fetchmail(self, cr, uid, ids, context=None):
""" deactivate fetchmail servers for all fields like 'fetchmail_XXX' that are False """
config = self.browse(cr, uid, ids[0], context)
fetchmail_fields = [f for f in self._columns if f.startswith('fetchmail_')]
# determine which models should not have active fetchmail servers, and
# deactivate all active servers for those models
models = [self._columns[f].fetchmail_model for f in fetchmail_fields if not config[f]]
if models:
fetchmail_server = self.pool.get('fetchmail.server')
server_ids = fetchmail_server.search(cr, uid, [('object_id.model', 'in', models), ('state', '=', 'done')])
fetchmail_server.set_draft(cr, uid, server_ids, context)
def configure_fetchmail(self, cr, uid, field, context=None):
""" open the form view of the fetchmail.server to configure """
action = {
'type': 'ir.actions.act_window',
'res_model': 'fetchmail.server',
'view_mode': 'form',
'target': 'current',
}
model_name = self._columns[field].fetchmail_model
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)])[0]
server_ids = self.pool.get('fetchmail.server').search(cr, uid, [('object_id', '=', model_id)])
if server_ids:
action['res_id'] = server_ids[0]
else:
action['context'] = {
'default_name': self._columns[field].fetchmail_name,
'default_object_id': model_id,
}
return action
def __getattr__(self, name):
""" catch calls to 'configure_fetchmail_XXX' """
if name.startswith('configure_fetchmail_'):
return (lambda cr, uid, ids, context=None:
self.configure_fetchmail(cr, uid, name[10:], context))
return super(fetchmail_config_settings, self).__getattr__(name)
| agpl-3.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.4/django/contrib/messages/storage/__init__.py | 393 | 1183 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
def get_storage(import_path):
"""
Imports the message storage class described by import_path, where
import_path is the full Python path to the class.
"""
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a Python path." % import_path)
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, classname))
# Callable with the same interface as the storage classes i.e. accepts a
# 'request' object. It is wrapped in a lambda to stop 'settings' being used at
# the module level
default_storage = lambda request: get_storage(settings.MESSAGE_STORAGE)(request)
| mit |
tghosgor/libarmmcu | gyp/pylib/gyp/__init__.py | 19 | 22047 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
| gpl-3.0 |
AMOboxTV/AMOBox.LegoBuild | plugin.video.salts/scrapers/izlemeyedeger_scraper.py | 1 | 3982 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://www.izlemeyedeger.com'
class IzlemeyeDeger_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'IzlemeyeDeger'
def resolve_link(self, link):
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
if 'views' in item and item['views']:
label += ' (%s views)' % item['views']
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
embed_url = dom_parser.parse_dom(html, 'meta', {'itemprop': 'embedURL'}, ret='content')
if embed_url:
html = self._http_get(embed_url[0], cache_limit=.5)
for match in re.finditer('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?"', html):
stream_url, height = match.groups()
stream_url = stream_url.replace('\\&', '&')
host = self._get_direct_hostname(stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(height)
stream_url += '|User-Agent=%s&Referer=%s' % (scraper_utils.get_ua(), urllib.quote(embed_url[0]))
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def search(self, video_type, title, year, season=''):
results = []
search_url = urlparse.urljoin(self.base_url, '/arama?q=%s')
search_url = search_url % (urllib.quote_plus(title))
html = self._http_get(search_url, cache_limit=1)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'section'})
if fragment:
for match in re.finditer('href="([^"]+).*?class="year">\s*(\d+).*?class="video-title">\s*([^<]+)', fragment[0], re.DOTALL):
url, match_year, match_title = match.groups('')
match_title = match_title.strip()
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
| gpl-2.0 |
atosatto/ansible | lib/ansible/parsing/vault/__init__.py | 14 | 28241 | # (c) 2014, James Tanner <tanner.jc@gmail.com>
# (c) 2016, Adrian Likins <alikins@redhat.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shlex
import shutil
import sys
import tempfile
import random
from io import BytesIO
from subprocess import call
from hashlib import sha256
from binascii import hexlify
from binascii import unhexlify
from hashlib import md5
# Note: Only used for loading obsolete VaultAES files. All files are written
# using the newer VaultAES256 which does not require md5
try:
from Crypto.Hash import SHA256, HMAC
HAS_HASH = True
except ImportError:
HAS_HASH = False
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
from ansible.errors import AnsibleError
from ansible.module_utils.six import PY3, binary_type
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_bytes, to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# OpenSSL pbkdf2_hmac
HAS_PBKDF2HMAC = False
try:
from cryptography.hazmat.primitives.hashes import SHA256 as c_SHA256
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
HAS_PBKDF2HMAC = True
except ImportError:
pass
except Exception as e:
display.vvvv("Optional dependency 'cryptography' raised an exception, falling back to 'Crypto'.")
import traceback
display.vvvv("Traceback from import of cryptography was {0}".format(traceback.format_exc()))
HAS_ANY_PBKDF2HMAC = HAS_PBKDF2 or HAS_PBKDF2HMAC
CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform." \
" You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto"
b_HEADER = b'$ANSIBLE_VAULT'
CIPHER_WHITELIST = frozenset((u'AES', u'AES256'))
CIPHER_WRITE_WHITELIST = frozenset((u'AES256',))
# See also CIPHER_MAPPING at the bottom of the file which maps cipher strings
# (used in VaultFile header) to a cipher class
def check_prereqs():
if not HAS_AES or not HAS_COUNTER or not HAS_ANY_PBKDF2HMAC or not HAS_HASH:
raise AnsibleError(CRYPTO_UPGRADE)
class AnsibleVaultError(AnsibleError):
pass
def is_encrypted(data):
""" Test if this is vault encrypted data blob
:arg data: a byte or text string to test whether it is recognized as vault
encrypted data
:returns: True if it is recognized. Otherwise, False.
"""
try:
# Make sure we have a byte string and that it only contains ascii
# bytes.
b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict')
except (UnicodeError, TypeError):
# The vault format is pure ascii so if we failed to encode to bytes
# via ascii we know that this is not vault data.
# Similarly, if it's not a string, it's not vault data
return False
if b_data.startswith(b_HEADER):
return True
return False
def is_encrypted_file(file_obj, start_pos=0, count=-1):
"""Test if the contents of a file obj are a vault encrypted data blob.
:arg file_obj: A file object that will be read from.
:kwarg start_pos: A byte offset in the file to start reading the header
from. Defaults to 0, the beginning of the file.
:kwarg count: Read up to this number of bytes from the file to determine
if it looks like encrypted vault data. The default is -1, read to the
end of file.
:returns: True if the file looks like a vault file. Otherwise, False.
"""
# read the header and reset the file stream to where it started
current_position = file_obj.tell()
try:
file_obj.seek(start_pos)
return is_encrypted(file_obj.read(count))
finally:
file_obj.seek(current_position)
class VaultLib:
def __init__(self, b_password):
self.b_password = to_bytes(b_password, errors='strict', encoding='utf-8')
self.cipher_name = None
self.b_version = b'1.1'
@staticmethod
def is_encrypted(data):
""" Test if this is vault encrypted data
:arg data: a byte or text string or a python3 to test for whether it is
recognized as vault encrypted data
:returns: True if it is recognized. Otherwise, False.
"""
# This could in the future, check to see if the data is a vault blob and
# is encrypted with a key associated with this vault
# instead of just checking the format.
display.deprecated(u'vault.VaultLib.is_encrypted is deprecated. Use vault.is_encrypted instead', version='2.4')
return is_encrypted(data)
@staticmethod
def is_encrypted_file(file_obj):
display.deprecated(u'vault.VaultLib.is_encrypted_file is deprecated. Use vault.is_encrypted_file instead', version='2.4')
return is_encrypted_file(file_obj)
def encrypt(self, plaintext):
"""Vault encrypt a piece of data.
:arg plaintext: a text or byte string to encrypt.
:returns: a utf-8 encoded byte str of encrypted data. The string
contains a header identifying this as vault encrypted data and
formatted to newline terminated lines of 80 characters. This is
suitable for dumping as is to a vault file.
If the string passed in is a text string, it will be encoded to UTF-8
before encryption.
"""
b_plaintext = to_bytes(plaintext, errors='surrogate_or_strict')
if is_encrypted(b_plaintext):
raise AnsibleError("input is already encrypted")
if not self.cipher_name or self.cipher_name not in CIPHER_WRITE_WHITELIST:
self.cipher_name = u"AES256"
try:
this_cipher = CIPHER_MAPPING[self.cipher_name]()
except KeyError:
raise AnsibleError(u"{0} cipher could not be found".format(self.cipher_name))
# encrypt data
b_ciphertext = this_cipher.encrypt(b_plaintext, self.b_password)
# format the data for output to the file
b_vaulttext = self._format_output(b_ciphertext)
return b_vaulttext
def decrypt(self, vaulttext, filename=None):
"""Decrypt a piece of vault encrypted data.
:arg vaulttext: a string to decrypt. Since vault encrypted data is an
ascii text format this can be either a byte str or unicode string.
:kwarg filename: a filename that the data came from. This is only
used to make better error messages in case the data cannot be
decrypted.
:returns: a byte string containing the decrypted data
"""
b_vaulttext = to_bytes(vaulttext, errors='strict', encoding='utf-8')
if self.b_password is None:
raise AnsibleError("A vault password must be specified to decrypt data")
if not is_encrypted(b_vaulttext):
msg = "input is not vault encrypted data"
if filename:
msg += "%s is not a vault encrypted file" % filename
raise AnsibleError(msg)
# clean out header
b_vaulttext = self._split_header(b_vaulttext)
# create the cipher object
if self.cipher_name in CIPHER_WHITELIST:
this_cipher = CIPHER_MAPPING[self.cipher_name]()
else:
raise AnsibleError("{0} cipher could not be found".format(self.cipher_name))
# try to unencrypt vaulttext
b_plaintext = this_cipher.decrypt(b_vaulttext, self.b_password)
if b_plaintext is None:
msg = "Decryption failed"
if filename:
msg += " on %s" % filename
raise AnsibleError(msg)
return b_plaintext
def _format_output(self, b_ciphertext):
""" Add header and format to 80 columns
:arg b_vaulttext: the encrypted and hexlified data as a byte string
:returns: a byte str that should be dumped into a file. It's
formatted to 80 char columns and has the header prepended
"""
if not self.cipher_name:
raise AnsibleError("the cipher must be set before adding a header")
header = b';'.join([b_HEADER, self.b_version,
to_bytes(self.cipher_name,'utf-8', errors='strict')])
b_vaulttext = [header]
b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)]
b_vaulttext += [b'']
b_vaulttext = b'\n'.join(b_vaulttext)
return b_vaulttext
def _split_header(self, b_vaulttext):
"""Retrieve information about the Vault and clean the data
When data is saved, it has a header prepended and is formatted into 80
character lines. This method extracts the information from the header
and then removes the header and the inserted newlines. The string returned
is suitable for processing by the Cipher classes.
:arg b_vaulttext: byte str containing the data from a save file
:returns: a byte str suitable for passing to a Cipher class's
decrypt() function.
"""
# used by decrypt
b_tmpdata = b_vaulttext.split(b'\n')
b_tmpheader = b_tmpdata[0].strip().split(b';')
self.b_version = b_tmpheader[1].strip()
self.cipher_name = to_text(b_tmpheader[2].strip())
b_ciphertext = b''.join(b_tmpdata[1:])
return b_ciphertext
class VaultEditor:
def __init__(self, b_password):
self.vault = VaultLib(b_password)
# TODO: mv shred file stuff to it's own class
def _shred_file_custom(self, tmp_path):
""""Destroy a file, when shred (core-utils) is not available
Unix `shred' destroys files "so that they can be recovered only with great difficulty with
specialised hardware, if at all". It is based on the method from the paper
"Secure Deletion of Data from Magnetic and Solid-State Memory",
Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
We do not go to that length to re-implement shred in Python; instead, overwriting with a block
of random data should suffice.
See https://github.com/ansible/ansible/pull/13700 .
"""
file_len = os.path.getsize(tmp_path)
if file_len > 0: # avoid work when file was empty
max_chunk_len = min(1024*1024*2, file_len)
passes = 3
with open(tmp_path, "wb") as fh:
for _ in range(passes):
fh.seek(0, 0)
# get a random chunk of data, each pass with other length
chunk_len = random.randint(max_chunk_len//2, max_chunk_len)
data = os.urandom(chunk_len)
for _ in range(0, file_len // chunk_len):
fh.write(data)
fh.write(data[:file_len % chunk_len])
assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy
os.fsync(fh)
def _shred_file(self, tmp_path):
"""Securely destroy a decrypted file
Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
it is a non-issue.
Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
a custom shredding method.
"""
if not os.path.isfile(tmp_path):
# file is already gone
return
try:
r = call(['shred', tmp_path])
except (OSError, ValueError):
# shred is not available on this system, or some other error occurred.
# ValueError caught because OS X El Capitan is raising an
# exception big enough to hit a limit in python2-2.7.11 and below.
# Symptom is ValueError: insecure pickle when shred is not
# installed there.
r = 1
if r != 0:
# we could not successfully execute unix shred; therefore, do custom shred.
self._shred_file_custom(tmp_path)
os.remove(tmp_path)
def _edit_file_helper(self, filename, existing_data=None, force_save=False):
# Create a tempfile
fd, tmp_path = tempfile.mkstemp()
os.close(fd)
try:
if existing_data:
self.write_data(existing_data, tmp_path, shred=False)
# drop the user into an editor on the tmp file
call(self._editor_shell_command(tmp_path))
except:
# whatever happens, destroy the decrypted file
self._shred_file(tmp_path)
raise
b_tmpdata = self.read_data(tmp_path)
# Do nothing if the content has not changed
if existing_data == b_tmpdata and not force_save:
self._shred_file(tmp_path)
return
# encrypt new data and write out to tmp
# An existing vaultfile will always be UTF-8,
# so decode to unicode here
b_ciphertext = self.vault.encrypt(b_tmpdata)
self.write_data(b_ciphertext, tmp_path)
# shuffle tmp file into place
self.shuffle_files(tmp_path, filename)
def encrypt_bytes(self, b_plaintext):
check_prereqs()
b_ciphertext = self.vault.encrypt(b_plaintext)
return b_ciphertext
def encrypt_file(self, filename, output_file=None):
check_prereqs()
# A file to be encrypted into a vaultfile could be any encoding
# so treat the contents as a byte string.
# follow the symlink
filename = os.path.realpath(filename)
b_plaintext = self.read_data(filename)
b_ciphertext = self.vault.encrypt(b_plaintext)
self.write_data(b_ciphertext, output_file or filename)
def decrypt_file(self, filename, output_file=None):
check_prereqs()
# follow the symlink
filename = os.path.realpath(filename)
ciphertext = self.read_data(filename)
try:
plaintext = self.vault.decrypt(ciphertext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename)))
self.write_data(plaintext, output_file or filename, shred=False)
def create_file(self, filename):
""" create a new encrypted file """
check_prereqs()
# FIXME: If we can raise an error here, we can probably just make it
# behave like edit instead.
if os.path.isfile(filename):
raise AnsibleError("%s exists, please use 'edit' instead" % filename)
self._edit_file_helper(filename)
def edit_file(self, filename):
check_prereqs()
# follow the symlink
filename = os.path.realpath(filename)
ciphertext = self.read_data(filename)
try:
plaintext = self.vault.decrypt(ciphertext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename)))
if self.vault.cipher_name not in CIPHER_WRITE_WHITELIST:
# we want to get rid of files encrypted with the AES cipher
self._edit_file_helper(filename, existing_data=plaintext, force_save=True)
else:
self._edit_file_helper(filename, existing_data=plaintext, force_save=False)
def plaintext(self, filename):
check_prereqs()
ciphertext = self.read_data(filename)
try:
plaintext = self.vault.decrypt(ciphertext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename)))
return plaintext
def rekey_file(self, filename, b_new_password):
check_prereqs()
# follow the symlink
filename = os.path.realpath(filename)
prev = os.stat(filename)
ciphertext = self.read_data(filename)
try:
plaintext = self.vault.decrypt(ciphertext)
except AnsibleError as e:
raise AnsibleError("%s for %s" % (to_bytes(e),to_bytes(filename)))
# This is more or less an assert, see #18247
if b_new_password is None:
raise AnsibleError('The value for the new_password to rekey %s with is not valid' % filename)
new_vault = VaultLib(b_new_password)
new_ciphertext = new_vault.encrypt(plaintext)
self.write_data(new_ciphertext, filename)
# preserve permissions
os.chmod(filename, prev.st_mode)
os.chown(filename, prev.st_uid, prev.st_gid)
def read_data(self, filename):
try:
if filename == '-':
data = sys.stdin.read()
else:
with open(filename, "rb") as fh:
data = fh.read()
except Exception as e:
raise AnsibleError(str(e))
return data
# TODO: add docstrings for arg types since this code is picky about that
def write_data(self, data, filename, shred=True):
"""write data to given path
:arg data: the encrypted and hexlified data as a utf-8 byte string
:arg filename: filename to save 'data' to.
:arg shred: if shred==True, make sure that the original data is first shredded so
that is cannot be recovered.
"""
# FIXME: do we need this now? data_bytes should always be a utf-8 byte string
b_file_data = to_bytes(data, errors='strict')
if filename == '-':
sys.stdout.write(b_file_data)
else:
if os.path.isfile(filename):
if shred:
self._shred_file(filename)
else:
os.remove(filename)
with open(filename, "wb") as fh:
fh.write(b_file_data)
def shuffle_files(self, src, dest):
prev = None
# overwrite dest with src
if os.path.isfile(dest):
prev = os.stat(dest)
# old file 'dest' was encrypted, no need to _shred_file
os.remove(dest)
shutil.move(src, dest)
# reset permissions if needed
if prev is not None:
# TODO: selinux, ACLs, xattr?
os.chmod(dest, prev.st_mode)
os.chown(dest, prev.st_uid, prev.st_gid)
def _editor_shell_command(self, filename):
EDITOR = os.environ.get('EDITOR','vi')
editor = shlex.split(EDITOR)
editor.append(filename)
return editor
########################################
# CIPHERS #
########################################
class VaultAES:
# this version has been obsoleted by the VaultAES256 class
# which uses encrypt-then-mac (fixing order) and also improving the KDF used
# code remains for upgrade purposes only
# http://stackoverflow.com/a/16761459
# Note: strings in this class should be byte strings by default.
def __init__(self):
if not HAS_AES:
raise AnsibleError(CRYPTO_UPGRADE)
def _aes_derive_key_and_iv(self, b_password, b_salt, key_length, iv_length):
""" Create a key and an initialization vector """
b_d = b_di = b''
while len(b_d) < key_length + iv_length:
b_text = b''.join([b_di, b_password, b_salt])
b_di = to_bytes(md5(b_text).digest(), errors='strict')
b_d += b_di
b_key = b_d[:key_length]
b_iv = b_d[key_length:key_length+iv_length]
return b_key, b_iv
def encrypt(self, b_plaintext, b_password, key_length=32):
""" Read plaintext data from in_file and write encrypted to out_file """
raise AnsibleError("Encryption disabled for deprecated VaultAES class")
def decrypt(self, b_vaulttext, b_password, key_length=32):
""" Decrypt the given data and return it
:arg b_data: A byte string containing the encrypted data
:arg b_password: A byte string containing the encryption password
:arg key_length: Length of the key
:returns: A byte string containing the decrypted data
"""
display.deprecated(u'The VaultAES format is insecure and has been'
' deprecated since Ansible-1.5. Use vault rekey FILENAME to'
' switch to the newer VaultAES256 format', version='2.3')
# http://stackoverflow.com/a/14989032
b_ciphertext = unhexlify(b_vaulttext)
in_file = BytesIO(b_ciphertext)
in_file.seek(0)
out_file = BytesIO()
bs = AES.block_size
b_tmpsalt = in_file.read(bs)
b_salt = b_tmpsalt[len(b'Salted__'):]
b_key, b_iv = self._aes_derive_key_and_iv(b_password, b_salt, key_length, bs)
cipher = AES.new(b_key, AES.MODE_CBC, b_iv)
b_next_chunk = b''
finished = False
while not finished:
b_chunk, b_next_chunk = b_next_chunk, cipher.decrypt(in_file.read(1024 * bs))
if len(b_next_chunk) == 0:
if PY3:
padding_length = b_chunk[-1]
else:
padding_length = ord(b_chunk[-1])
b_chunk = b_chunk[:-padding_length]
finished = True
out_file.write(b_chunk)
out_file.flush()
# reset the stream pointer to the beginning
out_file.seek(0)
b_out_data = out_file.read()
out_file.close()
# split out sha and verify decryption
b_split_data = b_out_data.split(b"\n", 1)
b_this_sha = b_split_data[0]
b_plaintext = b_split_data[1]
b_test_sha = to_bytes(sha256(b_plaintext).hexdigest())
if b_this_sha != b_test_sha:
raise AnsibleError("Decryption failed")
return b_plaintext
class VaultAES256:
"""
Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
Keys are derived using PBKDF2
"""
# http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
# Note: strings in this class should be byte strings by default.
def __init__(self):
check_prereqs()
@staticmethod
def _create_key(b_password, b_salt, keylength, ivlength):
hash_function = SHA256
# make two keys and one iv
pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
b_derivedkey = PBKDF2(b_password, b_salt, dkLen=(2 * keylength) + ivlength,
count=10000, prf=pbkdf2_prf)
return b_derivedkey
@classmethod
def _gen_key_initctr(cls, b_password, b_salt):
# 16 for AES 128, 32 for AES256
keylength = 32
# match the size used for counter.new to avoid extra work
ivlength = 16
if HAS_PBKDF2HMAC:
backend = default_backend()
kdf = PBKDF2HMAC(
algorithm=c_SHA256(),
length=2 * keylength + ivlength,
salt=b_salt,
iterations=10000,
backend=backend)
b_derivedkey = kdf.derive(b_password)
else:
b_derivedkey = cls._create_key(b_password, b_salt, keylength, ivlength)
b_key1 = b_derivedkey[:keylength]
b_key2 = b_derivedkey[keylength:(keylength * 2)]
b_iv = b_derivedkey[(keylength * 2):(keylength * 2) + ivlength]
return b_key1, b_key2, hexlify(b_iv)
def encrypt(self, b_plaintext, b_password):
b_salt = os.urandom(32)
b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
# PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
bs = AES.block_size
padding_length = (bs - len(b_plaintext) % bs) or bs
b_plaintext += to_bytes(padding_length * chr(padding_length), encoding='ascii', errors='strict')
# COUNTER.new PARAMETERS
# 1) nbits (integer) - Length of the counter, in bits.
# 2) initial_value (integer) - initial value of the counter. "iv" from _gen_key_initctr
ctr = Counter.new(128, initial_value=int(b_iv, 16))
# AES.new PARAMETERS
# 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from _gen_key_initctr
# 2) MODE_CTR, is the recommended mode
# 3) counter=<CounterObject>
cipher = AES.new(b_key1, AES.MODE_CTR, counter=ctr)
# ENCRYPT PADDED DATA
b_ciphertext = cipher.encrypt(b_plaintext)
# COMBINE SALT, DIGEST AND DATA
hmac = HMAC.new(b_key2, b_ciphertext, SHA256)
b_vaulttext = b'\n'.join([hexlify(b_salt), to_bytes(hmac.hexdigest()), hexlify(b_ciphertext)])
b_vaulttext = hexlify(b_vaulttext)
return b_vaulttext
def decrypt(self, b_vaulttext, b_password):
# SPLIT SALT, DIGEST, AND DATA
b_vaulttext = unhexlify(b_vaulttext)
b_salt, b_cryptedHmac, b_ciphertext = b_vaulttext.split(b"\n", 2)
b_salt = unhexlify(b_salt)
b_ciphertext = unhexlify(b_ciphertext)
b_key1, b_key2, b_iv = self._gen_key_initctr(b_password, b_salt)
# EXIT EARLY IF DIGEST DOESN'T MATCH
hmacDecrypt = HMAC.new(b_key2, b_ciphertext, SHA256)
if not self._is_equal(b_cryptedHmac, to_bytes(hmacDecrypt.hexdigest())):
return None
# SET THE COUNTER AND THE CIPHER
ctr = Counter.new(128, initial_value=int(b_iv, 16))
cipher = AES.new(b_key1, AES.MODE_CTR, counter=ctr)
# DECRYPT PADDED DATA
b_plaintext = cipher.decrypt(b_ciphertext)
# UNPAD DATA
if PY3:
padding_length = b_plaintext[-1]
else:
padding_length = ord(b_plaintext[-1])
b_plaintext = b_plaintext[:-padding_length]
return b_plaintext
@staticmethod
def _is_equal(b_a, b_b):
"""
Comparing 2 byte arrrays in constant time
to avoid timing attacks.
It would be nice if there was a library for this but
hey.
"""
if not (isinstance(b_a, binary_type) and isinstance(b_b, binary_type)):
raise TypeError('_is_equal can only be used to compare two byte strings')
# http://codahale.com/a-lesson-in-timing-attacks/
if len(b_a) != len(b_b):
return False
result = 0
for b_x, b_y in zip(b_a, b_b):
if PY3:
result |= b_x ^ b_y
else:
result |= ord(b_x) ^ ord(b_y)
return result == 0
# Keys could be made bytes later if the code that gets the data is more
# naturally byte-oriented
CIPHER_MAPPING = {
u'AES': VaultAES,
u'AES256': VaultAES256,
}
| gpl-3.0 |
runekaagaard/django-contrib-locking | django/contrib/auth/handlers/modwsgi.py | 115 | 1344 | from django.contrib import auth
from django import db
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
| bsd-3-clause |
jabesq/home-assistant | homeassistant/components/sensibo/climate.py | 1 | 12181 | """Support for Sensibo wifi-enabled home thermostats."""
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
import pysensibo
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT_COOL, HVAC_MODE_COOL, HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_STATE, ATTR_TEMPERATURE, CONF_API_KEY, CONF_ID,
STATE_ON, TEMP_CELSIUS, TEMP_FAHRENHEIT)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.temperature import convert as convert_temperature
from .const import DOMAIN as SENSIBO_DOMAIN
_LOGGER = logging.getLogger(__name__)
ALL = ['all']
TIMEOUT = 10
SERVICE_ASSUME_STATE = 'assume_state'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
})
ASSUME_STATE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_STATE): cv.string,
})
_FETCH_FIELDS = ','.join([
'room{name}', 'measurements', 'remoteCapabilities',
'acState', 'connectionStatus{isAlive}', 'temperatureUnit'])
_INITIAL_FETCH_FIELDS = 'id,' + _FETCH_FIELDS
FIELD_TO_FLAG = {
'fanLevel': SUPPORT_FAN_MODE,
'swing': SUPPORT_SWING_MODE,
'targetTemperature': SUPPORT_TARGET_TEMPERATURE,
}
SENSIBO_TO_HA = {
"cool": HVAC_MODE_COOL,
"heat": HVAC_MODE_HEAT,
"fan": HVAC_MODE_FAN_ONLY,
"auto": HVAC_MODE_HEAT_COOL,
"dry": HVAC_MODE_DRY
}
HA_TO_SENSIBO = {value: key for key, value in SENSIBO_TO_HA.items()}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up Sensibo devices."""
client = pysensibo.SensiboClient(
config[CONF_API_KEY], session=async_get_clientsession(hass),
timeout=TIMEOUT)
devices = []
try:
for dev in (
await client.async_get_devices(_INITIAL_FETCH_FIELDS)):
if config[CONF_ID] == ALL or dev['id'] in config[CONF_ID]:
devices.append(SensiboClimate(
client, dev, hass.config.units.temperature_unit))
except (aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError, pysensibo.SensiboError):
_LOGGER.exception('Failed to connect to Sensibo servers.')
raise PlatformNotReady
if not devices:
return
async_add_entities(devices)
async def async_assume_state(service):
"""Set state according to external service call.."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_climate = [device for device in devices
if device.entity_id in entity_ids]
else:
target_climate = devices
update_tasks = []
for climate in target_climate:
await climate.async_assume_state(
service.data.get(ATTR_STATE))
update_tasks.append(climate.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
SENSIBO_DOMAIN, SERVICE_ASSUME_STATE, async_assume_state,
schema=ASSUME_STATE_SCHEMA)
class SensiboClimate(ClimateDevice):
"""Representation of a Sensibo device."""
def __init__(self, client, data, units):
"""Build SensiboClimate.
client: aiohttp session.
data: initially-fetched data.
"""
self._client = client
self._id = data['id']
self._external_state = None
self._units = units
self._available = False
self._do_update(data)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
def _do_update(self, data):
self._name = data['room']['name']
self._measurements = data['measurements']
self._ac_states = data['acState']
self._available = data['connectionStatus']['isAlive']
capabilities = data['remoteCapabilities']
self._operations = [SENSIBO_TO_HA[mode] for mode
in capabilities['modes']]
self._operations.append(HVAC_MODE_OFF)
self._current_capabilities = \
capabilities['modes'][self._ac_states['mode']]
temperature_unit_key = data.get('temperatureUnit') or \
self._ac_states.get('temperatureUnit')
if temperature_unit_key:
self._temperature_unit = TEMP_CELSIUS if \
temperature_unit_key == 'C' else TEMP_FAHRENHEIT
self._temperatures_list = self._current_capabilities[
'temperatures'].get(temperature_unit_key, {}).get('values', [])
else:
self._temperature_unit = self._units
self._temperatures_list = []
self._supported_features = 0
for key in self._ac_states:
if key in FIELD_TO_FLAG:
self._supported_features |= FIELD_TO_FLAG[key]
@property
def state(self):
"""Return the current state."""
return self._external_state or super().state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {'battery': self.current_battery}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return self._temperature_unit
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._ac_states.get('targetTemperature')
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self.temperature_unit == self.hass.config.units.temperature_unit:
# We are working in same units as the a/c unit. Use whole degrees
# like the API supports.
return 1
# Unit conversion is going on. No point to stick to specific steps.
return None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
if not self._ac_states['on']:
return HVAC_MODE_OFF
return SENSIBO_TO_HA.get(self._ac_states['mode'])
@property
def current_humidity(self):
"""Return the current humidity."""
return self._measurements['humidity']
@property
def current_battery(self):
"""Return the current battery voltage."""
return self._measurements.get('batteryVoltage')
@property
def current_temperature(self):
"""Return the current temperature."""
# This field is not affected by temperatureUnit.
# It is always in C
return convert_temperature(
self._measurements['temperature'],
TEMP_CELSIUS,
self.temperature_unit)
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._operations
@property
def fan_mode(self):
"""Return the fan setting."""
return self._ac_states.get('fanLevel')
@property
def fan_modes(self):
"""List of available fan modes."""
return self._current_capabilities.get('fanLevels')
@property
def swing_mode(self):
"""Return the fan setting."""
return self._ac_states.get('swing')
@property
def swing_modes(self):
"""List of available swing modes."""
return self._current_capabilities.get('swing')
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._temperatures_list[0] \
if self._temperatures_list else super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._temperatures_list[-1] \
if self._temperatures_list else super().max_temp
@property
def unique_id(self):
"""Return unique ID based on Sensibo ID."""
return self._id
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = int(temperature)
if temperature not in self._temperatures_list:
# Requested temperature is not supported.
if temperature == self.target_temperature:
return
index = self._temperatures_list.index(self.target_temperature)
if temperature > self.target_temperature and index < len(
self._temperatures_list) - 1:
temperature = self._temperatures_list[index + 1]
elif temperature < self.target_temperature and index > 0:
temperature = self._temperatures_list[index - 1]
else:
return
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'targetTemperature', temperature, self._ac_states)
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'fanLevel', fan_mode, self._ac_states)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'on', False, self._ac_states)
return
# Turn on if not currently on.
if not self._ac_states['on']:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'on', True, self._ac_states)
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'mode', HA_TO_SENSIBO[hvac_mode],
self._ac_states)
async def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, 'swing', swing_mode, self._ac_states)
async def async_assume_state(self, state):
"""Set external state."""
change_needed = \
(state != HVAC_MODE_OFF and not self._ac_states['on']) \
or (state == HVAC_MODE_OFF and self._ac_states['on'])
if change_needed:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id,
'on',
state != HVAC_MODE_OFF, # value
self._ac_states,
True # assumed_state
)
if state in [STATE_ON, HVAC_MODE_OFF]:
self._external_state = None
else:
self._external_state = state
async def async_update(self):
"""Retrieve latest state."""
try:
with async_timeout.timeout(TIMEOUT):
data = await self._client.async_get_device(
self._id, _FETCH_FIELDS)
self._do_update(data)
except (aiohttp.client_exceptions.ClientError,
pysensibo.SensiboError):
_LOGGER.warning('Failed to connect to Sensibo servers.')
self._available = False
| apache-2.0 |
Shao-Feng/crosswalk-test-suite | cordova/cordova-webapp-android-tests/webapp/webapp_uninstall.py | 18 | 2009 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Lin, Wanming <wanming.lin@intel.com>
import unittest
import os
import sys
import commands
import comm
class TestSWebAppUninstall(unittest.TestCase):
def test_uninstall(self):
comm.setUp()
app_name = "helloworld"
pkg_name = "com.example." + app_name.lower()
if not comm.check_app_installed(pkg_name, self):
comm.app_install(app_name, pkg_name, self)
comm.app_uninstall(pkg_name, self)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
CongSmile/virt-test | virttest/libvirt_xml/nwfilter_protocols/esp.py | 26 | 5664 | """
esp protocl support class(es)
http://libvirt.org/formatnwfilter.html#nwfelemsRulesProtoMisc
"""
from virttest.libvirt_xml import accessors, xcepts
from virttest.libvirt_xml.nwfilter_protocols import base
class Esp(base.TypedDeviceBase):
"""
Create new Esp xml instances
Properties:
attrs: libvirt_xml.nwfilter_protocols.Esp.Attr instance
"""
__slots__ = ('attrs',)
def __init__(self, type_name='file', virsh_instance=base.base.virsh):
accessors.XMLElementNest('attrs', self, parent_xpath='/',
tag_name='esp', subclass=self.Attr,
subclass_dargs={
'virsh_instance': virsh_instance})
super(Esp, self).__init__(protocol_tag='esp', type_name=type_name,
virsh_instance=virsh_instance)
def new_attr(self, **dargs):
"""
Return a new Attr instance and set properties from dargs
:param dargs: dict of attributes
:return: new Attr instance
"""
new_one = self.Attr(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def get_attr(self):
"""
Return esp attribute dict
:return: None if no esp in xml, dict of esp's attributes.
"""
try:
esp_node = self.xmltreefile.reroot('/esp')
except KeyError, detail:
raise xcepts.LibvirtXMLError(detail)
node = esp_node.getroot()
esp_attr = dict(node.items())
return esp_attr
class Attr(base.base.LibvirtXMLBase):
"""
Esp attribute XML class
Properties:
srcmacaddr: string, MAC address of sender
srcmacmask: string, Mask applied to MAC address of sender
dstmacaddr: string, MAC address of destination
dstmacmask: string, Mask applied to MAC address of destination
srcipaddr: string, Source IP address
srcipmask: string, Mask applied to source IP address
dstipaddr: string, Destination IP address
dstipmask: string, Mask applied to destination IP address
srcipfrom: string, Start of range of source IP address
srcipto: string, End of range of source IP address
dstipfrom: string, Start of range of destination IP address
dstipto: string, End of range of destination IP address
comment: string, text with max. 256 characters
state: string, comma separated list of NEW,ESTABLISHED,RELATED,INVALID or NONE
ipset: The name of an IPSet managed outside of libvirt
ipsetflags: flags for the IPSet; requires ipset attribute
"""
__slots__ = ('srcmacaddr', 'srcmacmask', 'dstmacaddr', 'dstmacmask',
'srcipaddr', 'srcipmask', 'dstipaddr', 'dstipmask',
'srcipfrom', 'srcipto', 'dstipfrom', 'dstipto',
'dscp', 'comment', 'state', 'ipset', 'ipsetflags')
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute('srcmacaddr', self, parent_xpath='/',
tag_name='esp', attribute='srcmacaddr')
accessors.XMLAttribute('srcmacmask', self, parent_xpath='/',
tag_name='esp', attribute='srcmacmask')
accessors.XMLAttribute('dstmacaddr', self, parent_xpath='/',
tag_name='esp', attribute='dstmacaddr')
accessors.XMLAttribute('dstmacmask', self, parent_xpath='/',
tag_name='esp', attribute='dstmacmask')
accessors.XMLAttribute('srcipaddr', self, parent_xpath='/',
tag_name='esp', attribute='srcipaddr')
accessors.XMLAttribute('srcipmask', self, parent_xpath='/',
tag_name='esp', attribute='srcipmask')
accessors.XMLAttribute('dstipaddr', self, parent_xpath='/',
tag_name='esp', attribute='dstipaddr')
accessors.XMLAttribute('dstipmask', self, parent_xpath='/',
tag_name='esp', attribute='dstipmask')
accessors.XMLAttribute('srcipfrom', self, parent_xpath='/',
tag_name='esp', attribute='srcipfrom')
accessors.XMLAttribute('srcipto', self, parent_xpath='/',
tag_name='esp', attribute='srcipto')
accessors.XMLAttribute('dstipfrom', self, parent_xpath='/',
tag_name='esp', attribute='dstipfrom')
accessors.XMLAttribute('dstipto', self, parent_xpath='/',
tag_name='esp', attribute='dstipto')
accessors.XMLAttribute('dscp', self, parent_xpath='/',
tag_name='esp', attribute='dscp')
accessors.XMLAttribute('comment', self, parent_xpath='/',
tag_name='esp', attribute='comment')
accessors.XMLAttribute('state', self, parent_xpath='/',
tag_name='esp', attribute='state')
accessors.XMLAttribute('ipset', self, parent_xpath='/',
tag_name='esp', attribute='ipset')
accessors.XMLAttribute('ipsetflags', self, parent_xpath='/',
tag_name='esp', attribute='ipsetflags')
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<esp/>'
| gpl-2.0 |
dbertha/odoo | openerp/report/report_sxw.py | 91 | 27265 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import StringIO
import cStringIO
import base64
from datetime import datetime
import os
import re
import time
from interface import report_rml
import preprocess
import logging
import openerp.tools as tools
import zipfile
import common
import openerp
from openerp import SUPERUSER_ID
from openerp.osv.fields import float as float_field, function as function_field, datetime as datetime_field
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
rml_parents = {
'tr':1,
'li':1,
'story': 0,
'section': 0
}
rml_tag="para"
sxw_parents = {
'table-row': 1,
'list-item': 1,
'body': 0,
'section': 0,
}
html_parents = {
'tr' : 1,
'body' : 0,
'div' : 0
}
sxw_tag = "p"
rml2sxw = {
'para': 'p',
}
def get_date_length(date_format=DEFAULT_SERVER_DATE_FORMAT):
return len((datetime.now()).strftime(date_format))
class rml_parse(object):
def __init__(self, cr, uid, name, parents=rml_parents, tag=rml_tag, context=None):
if not context:
context={}
self.cr = cr
self.uid = uid
self.pool = openerp.registry(cr.dbname)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
self.localcontext = {
'user': user,
'setCompany': self.setCompany,
'repeatIn': self.repeatIn,
'setLang': self.setLang,
'setTag': self.setTag,
'removeParentNode': self.removeParentNode,
'format': self.format,
'formatLang': self.formatLang,
'lang' : user.company_id.partner_id.lang,
'translate' : self._translate,
'setHtmlImage' : self.set_html_image,
'strip_name' : self._strip_name,
'time' : time,
'display_address': self.display_address,
# more context members are setup in setCompany() below:
# - company_id
# - logo
}
self.setCompany(user.company_id)
self.localcontext.update(context)
self.name = name
self._node = None
self.parents = parents
self.tag = tag
self._lang_cache = {}
self.lang_dict = {}
self.default_lang = {}
self.lang_dict_called = False
self._transl_regex = re.compile('(\[\[.+?\]\])')
def setTag(self, oldtag, newtag, attrs=None):
return newtag, attrs
def _ellipsis(self, char, size=100, truncation_str='...'):
if not char:
return ''
if len(char) <= size:
return char
return char[:size-len(truncation_str)] + truncation_str
def setCompany(self, company_id):
if company_id:
self.localcontext['company'] = company_id
self.localcontext['logo'] = company_id.logo
self.rml_header = company_id.rml_header
self.rml_header2 = company_id.rml_header2
self.rml_header3 = company_id.rml_header3
self.logo = company_id.logo
def _strip_name(self, name, maxlen=50):
return self._ellipsis(name, maxlen)
def format(self, text, oldtag=None):
return text.strip()
def removeParentNode(self, tag=None):
raise GeneratorExit('Skip')
def set_html_image(self,id,model=None,field=None,context=None):
if not id :
return ''
if not model:
model = 'ir.attachment'
try :
id = int(id)
res = self.pool[model].read(self.cr,self.uid,id)
if field :
return res[field]
elif model =='ir.attachment' :
return res['datas']
else :
return ''
except Exception:
return ''
def setLang(self, lang):
self.localcontext['lang'] = lang
self.lang_dict_called = False
# re-evaluate self.objects in a different environment
env = self.objects.env(self.cr, self.uid, self.localcontext)
self.objects = self.objects.with_env(env)
def _get_lang_dict(self):
pool_lang = self.pool['res.lang']
lang = self.localcontext.get('lang', 'en_US') or 'en_US'
lang_ids = pool_lang.search(self.cr,self.uid,[('code','=',lang)])
if not lang_ids:
lang_ids = pool_lang.search(self.cr,self.uid,[('code','=','en_US')])
lang_obj = pool_lang.browse(self.cr,self.uid,lang_ids[0])
self.lang_dict.update({'lang_obj':lang_obj,'date_format':lang_obj.date_format,'time_format':lang_obj.time_format})
self.default_lang[lang] = self.lang_dict.copy()
return True
def digits_fmt(self, obj=None, f=None, dp=None):
digits = self.get_digits(obj, f, dp)
return "%%.%df" % (digits, )
def get_digits(self, obj=None, f=None, dp=None):
d = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = self.pool['decimal.precision']
d = decimal_precision_obj.precision_get(self.cr, self.uid, dp)
elif obj and f:
res_digits = getattr(obj._columns[f], 'digits', lambda x: ((16, DEFAULT_DIGITS)))
if isinstance(res_digits, tuple):
d = res_digits[1]
else:
d = res_digits(self.cr)[1]
elif (hasattr(obj, '_field') and\
isinstance(obj._field, (float_field, function_field)) and\
obj._field.digits):
d = obj._field.digits[1]
if not d and d is not 0:
d = DEFAULT_DIGITS
return d
def formatLang(self, value, digits=None, date=False, date_time=False, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
if dp:
digits = self.get_digits(dp=dp)
else:
digits = self.get_digits(value)
if isinstance(value, (str, unicode)) and not value:
return ''
if not self.lang_dict_called:
self._get_lang_dict()
self.lang_dict_called = True
if date or date_time:
if not value:
return ''
date_format = self.lang_dict['date_format']
parse_format = DEFAULT_SERVER_DATE_FORMAT
if date_time:
value = value.split('.')[0]
date_format = date_format + " " + self.lang_dict['time_format']
parse_format = DEFAULT_SERVER_DATETIME_FORMAT
if isinstance(value, basestring):
# FIXME: the trimming is probably unreliable if format includes day/month names
# and those would need to be translated anyway.
date = datetime.strptime(value[:get_date_length(parse_format)], parse_format)
elif isinstance(value, time.struct_time):
date = datetime(*value[:6])
else:
date = datetime(*value.timetuple()[:6])
if date_time:
# Convert datetime values to the expected client/context timezone
date = datetime_field.context_timestamp(self.cr, self.uid,
timestamp=date,
context=self.localcontext)
return date.strftime(date_format.encode('utf-8'))
res = self.lang_dict['lang_obj'].format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj:
if currency_obj.position == 'after':
res='%s %s'%(res,currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res='%s %s'%(currency_obj.symbol, res)
return res
def display_address(self, address_record, without_company=False):
# FIXME handle `without_company`
return address_record.contact_address
def repeatIn(self, lst, name,nodes_parent=False):
ret_lst = []
for id in lst:
ret_lst.append({name:id})
return ret_lst
def _translate(self,text):
lang = self.localcontext['lang']
if lang and text and not text.isspace():
transl_obj = self.pool['ir.translation']
piece_list = self._transl_regex.split(text)
for pn in range(len(piece_list)):
if not self._transl_regex.match(piece_list[pn]):
source_string = piece_list[pn].replace('\n', ' ').strip()
if len(source_string):
translated_string = transl_obj._get_source(self.cr, self.uid, self.name, ('report', 'rml'), lang, source_string)
if translated_string:
piece_list[pn] = piece_list[pn].replace(source_string, translated_string)
text = ''.join(piece_list)
return text
def _add_header(self, rml_dom, header='external'):
if header=='internal':
rml_head = self.rml_header2
elif header=='internal landscape':
rml_head = self.rml_header3
else:
rml_head = self.rml_header
head_dom = etree.XML(rml_head)
for tag in head_dom:
found = rml_dom.find('.//'+tag.tag)
if found is not None and len(found):
if tag.get('position'):
found.append(tag)
else :
found.getparent().replace(found,tag)
return True
def set_context(self, objects, data, ids, report_type = None):
self.localcontext['data'] = data
self.localcontext['objects'] = objects
self.localcontext['digits_fmt'] = self.digits_fmt
self.localcontext['get_digits'] = self.get_digits
self.datas = data
self.ids = ids
self.objects = objects
if report_type:
if report_type=='odt' :
self.localcontext.update({'name_space' :common.odt_namespace})
else:
self.localcontext.update({'name_space' :common.sxw_namespace})
# WARNING: the object[0].exists() call below is slow but necessary because
# some broken reporting wizards pass incorrect IDs (e.g. ir.ui.menu ids)
if objects and len(objects) == 1 and \
objects[0].exists() and 'company_id' in objects[0] and objects[0].company_id:
# When we print only one record, we can auto-set the correct
# company in the localcontext. For other cases the report
# will have to call setCompany() inside the main repeatIn loop.
self.setCompany(objects[0].company_id)
class report_sxw(report_rml, preprocess.report):
"""
The register=True kwarg has been added to help remove the
openerp.netsvc.LocalService() indirection and the related
openerp.report.interface.report_int._reports dictionary:
report_sxw registered in XML with auto=False are also registered in Python.
In that case, they are registered in the above dictionary. Since
registration is automatically done upon instanciation, and that
instanciation is needed before rendering, a way was needed to
instanciate-without-register a report. In the future, no report
should be registered in the above dictionary and it will be dropped.
"""
def __init__(self, name, table, rml=False, parser=rml_parse, header='external', store=False, register=True):
report_rml.__init__(self, name, table, rml, '', register=register)
self.name = name
self.parser = parser
self.header = header
self.store = store
self.internal_header=False
if header=='internal' or header=='internal landscape':
self.internal_header=True
def getObjects(self, cr, uid, ids, context):
table_obj = openerp.registry(cr.dbname)[self.table]
return table_obj.browse(cr, uid, ids, context=context)
def create(self, cr, uid, ids, data, context=None):
context = dict(context or {})
if self.internal_header:
context.update(internal_header=self.internal_header)
# skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases
context.update(bin_raw=True)
registry = openerp.registry(cr.dbname)
ir_obj = registry['ir.actions.report.xml']
registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context)
report_xml_ids = ir_obj.search(cr, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_xml_ids:
report_xml = ir_obj.browse(cr, uid, report_xml_ids[0], context=context)
else:
title = ''
report_file = tools.file_open(self.tmpl, subdir=None)
try:
rml = report_file.read()
report_type= data.get('report_type', 'pdf')
class a(object):
def __init__(self, *args, **argv):
for key,arg in argv.items():
setattr(self, key, arg)
report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header)
finally:
report_file.close()
# We add an attribute on the ir.actions.report.xml instance.
# This attribute 'use_global_header' will be used by
# the create_single_XXX function of the report engine.
# This change has been done to avoid a big change of the API.
setattr(report_xml, 'use_global_header', self.header if report_xml.header else False)
report_type = report_xml.report_type
if report_type in ['sxw','odt']:
fnct = self.create_source_odt
elif report_type in ['pdf','raw','txt','html']:
fnct = self.create_source_pdf
elif report_type=='html2html':
fnct = self.create_source_html2html
elif report_type=='mako2html':
fnct = self.create_source_mako2html
else:
raise NotImplementedError(_('Unknown report type: %s') % report_type)
fnct_ret = fnct(cr, uid, ids, data, report_xml, context)
if not fnct_ret:
return False, False
return fnct_ret
def create_source_odt(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_odt(cr, uid, ids, data, report_xml, context or {})
def create_source_html2html(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_html2html(cr, uid, ids, data, report_xml, context or {})
def create_source_mako2html(self, cr, uid, ids, data, report_xml, context=None):
return self.create_single_mako2html(cr, uid, ids, data, report_xml, context or {})
def create_source_pdf(self, cr, uid, ids, data, report_xml, context=None):
if not context:
context={}
registry = openerp.registry(cr.dbname)
attach = report_xml.attachment
if attach:
objs = self.getObjects(cr, uid, ids, context)
results = []
for obj in objs:
aname = eval(attach, {'object':obj, 'time':time})
result = False
if report_xml.attachment_use and aname and context.get('attachment_use', True):
aids = registry['ir.attachment'].search(cr, uid, [('datas_fname','=',aname+'.pdf'),('res_model','=',self.table),('res_id','=',obj.id)])
if aids:
brow_rec = registry['ir.attachment'].browse(cr, uid, aids[0])
if not brow_rec.datas:
continue
d = base64.decodestring(brow_rec.datas)
results.append((d,'pdf'))
continue
result = self.create_single_pdf(cr, uid, [obj.id], data, report_xml, context)
if not result:
return False
if aname:
try:
name = aname+'.'+result[1]
# Remove the default_type entry from the context: this
# is for instance used on the account.account_invoices
# and is thus not intended for the ir.attachment type
# field.
ctx = dict(context)
ctx.pop('default_type', None)
registry['ir.attachment'].create(cr, uid, {
'name': aname,
'datas': base64.encodestring(result[0]),
'datas_fname': name,
'res_model': self.table,
'res_id': obj.id,
}, context=ctx
)
except Exception:
#TODO: should probably raise a proper osv_except instead, shouldn't we? see LP bug #325632
_logger.error('Could not create saved report attachment', exc_info=True)
results.append(result)
if results:
if results[0][1]=='pdf':
from pyPdf import PdfFileWriter, PdfFileReader
output = PdfFileWriter()
for r in results:
reader = PdfFileReader(cStringIO.StringIO(r[0]))
for page in range(reader.getNumPages()):
output.addPage(reader.getPage(page))
s = cStringIO.StringIO()
output.write(s)
return s.getvalue(), results[0][1]
return self.create_single_pdf(cr, uid, ids, data, report_xml, context)
def create_single_pdf(self, cr, uid, ids, data, report_xml, context=None):
if not context:
context={}
logo = None
context = context.copy()
title = report_xml.name
rml = report_xml.report_rml_content
# if no rml file is found
if not rml:
return False
rml_parser = self.parser(cr, uid, self.name2, context=context)
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, report_xml.report_type)
processed_rml = etree.XML(rml)
if report_xml.use_global_header:
rml_parser._add_header(processed_rml, self.header)
processed_rml = self.preprocess_rml(processed_rml,report_xml.report_type)
if rml_parser.logo:
logo = base64.decodestring(rml_parser.logo)
create_doc = self.generators[report_xml.report_type]
pdf = create_doc(etree.tostring(processed_rml),rml_parser.localcontext,logo,title.encode('utf8'))
return pdf, report_xml.report_type
def create_single_odt(self, cr, uid, ids, data, report_xml, context=None):
context = dict(context or {})
context['parents'] = sxw_parents
report_type = report_xml.report_type
binary_report_content = report_xml.report_sxw_content
if isinstance(report_xml.report_sxw_content, unicode):
# if binary content was passed as unicode, we must
# re-encode it as a 8-bit string using the pass-through
# 'latin1' encoding, to restore the original byte values.
# See also osv.fields.sanitize_binary_value()
binary_report_content = report_xml.report_sxw_content.encode("latin1")
sxw_io = StringIO.StringIO(binary_report_content)
sxw_z = zipfile.ZipFile(sxw_io, mode='r')
rml = sxw_z.read('content.xml')
meta = sxw_z.read('meta.xml')
mime_type = sxw_z.read('mimetype')
if mime_type == 'application/vnd.sun.xml.writer':
mime_type = 'sxw'
else :
mime_type = 'odt'
sxw_z.close()
rml_parser = self.parser(cr, uid, self.name2, context=context)
rml_parser.parents = sxw_parents
rml_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, mime_type)
rml_dom_meta = node = etree.XML(meta)
elements = node.findall(rml_parser.localcontext['name_space']["meta"]+"user-defined")
for pe in elements:
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name"):
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 3":
pe[0].text=data['id']
if pe.get(rml_parser.localcontext['name_space']["meta"]+"name") == "Info 4":
pe[0].text=data['model']
meta = etree.tostring(rml_dom_meta, encoding='utf-8',
xml_declaration=True)
rml_dom = etree.XML(rml)
elements = []
key1 = rml_parser.localcontext['name_space']["text"]+"p"
key2 = rml_parser.localcontext['name_space']["text"]+"drop-down"
for n in rml_dom.iterdescendants():
if n.tag == key1:
elements.append(n)
if mime_type == 'odt':
for pe in elements:
e = pe.findall(key2)
for de in e:
pp=de.getparent()
if de.text or de.tail:
pe.text = de.text or de.tail
for cnd in de:
if cnd.text or cnd.tail:
if pe.text:
pe.text += cnd.text or cnd.tail
else:
pe.text = cnd.text or cnd.tail
pp.remove(de)
else:
for pe in elements:
e = pe.findall(key2)
for de in e:
pp = de.getparent()
if de.text or de.tail:
pe.text = de.text or de.tail
for cnd in de:
text = cnd.get("{http://openoffice.org/2000/text}value",False)
if text:
if pe.text and text.startswith('[['):
pe.text += text
elif text.startswith('[['):
pe.text = text
if de.getparent():
pp.remove(de)
rml_dom = self.preprocess_rml(rml_dom, mime_type)
create_doc = self.generators[mime_type]
odt = etree.tostring(create_doc(rml_dom, rml_parser.localcontext),
encoding='utf-8', xml_declaration=True)
sxw_contents = {'content.xml':odt, 'meta.xml':meta}
if report_xml.use_global_header:
#Add corporate header/footer
rml_file = tools.file_open(os.path.join('base', 'report', 'corporate_%s_header.xml' % report_type))
try:
rml = rml_file.read()
rml_parser = self.parser(cr, uid, self.name2, context=context)
rml_parser.parents = sxw_parents
rml_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
rml_parser.set_context(objs, data, ids, report_xml.report_type)
rml_dom = self.preprocess_rml(etree.XML(rml),report_type)
create_doc = self.generators[report_type]
odt = create_doc(rml_dom,rml_parser.localcontext)
if report_xml.use_global_header:
rml_parser._add_header(odt)
odt = etree.tostring(odt, encoding='utf-8',
xml_declaration=True)
sxw_contents['styles.xml'] = odt
finally:
rml_file.close()
#created empty zip writing sxw contents to avoid duplication
sxw_out = StringIO.StringIO()
sxw_out_zip = zipfile.ZipFile(sxw_out, mode='w')
sxw_template_zip = zipfile.ZipFile (sxw_io, 'r')
for item in sxw_template_zip.infolist():
if item.filename not in sxw_contents:
buffer = sxw_template_zip.read(item.filename)
sxw_out_zip.writestr(item.filename, buffer)
for item_filename, buffer in sxw_contents.iteritems():
sxw_out_zip.writestr(item_filename, buffer)
sxw_template_zip.close()
sxw_out_zip.close()
final_op = sxw_out.getvalue()
sxw_io.close()
sxw_out.close()
return final_op, mime_type
def create_single_html2html(self, cr, uid, ids, data, report_xml, context=None):
context = dict(context or {})
context['parents'] = html_parents
report_type = 'html'
html = report_xml.report_rml_content
html_parser = self.parser(cr, uid, self.name2, context=context)
html_parser.parents = html_parents
html_parser.tag = sxw_tag
objs = self.getObjects(cr, uid, ids, context)
html_parser.set_context(objs, data, ids, report_type)
html_dom = etree.HTML(html)
html_dom = self.preprocess_rml(html_dom,'html2html')
create_doc = self.generators['html2html']
html = etree.tostring(create_doc(html_dom, html_parser.localcontext))
return html.replace('&','&').replace('<', '<').replace('>', '>').replace('</br>',''), report_type
def create_single_mako2html(self, cr, uid, ids, data, report_xml, context=None):
mako_html = report_xml.report_rml_content
html_parser = self.parser(cr, uid, self.name2, context)
objs = self.getObjects(cr, uid, ids, context)
html_parser.set_context(objs, data, ids, 'html')
create_doc = self.generators['makohtml2html']
html = create_doc(mako_html,html_parser.localcontext)
return html,'html'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
foyzur/gpdb | gpMgmt/bin/gppylib/test/behave_utils/utils.py | 4 | 64021 | #!/usr/bin/env python
import re, os, signal, time, filecmp, stat, fileinput
import yaml
from gppylib.commands.gp import GpStart, chk_local_db_running
from gppylib.commands.base import Command, ExecutionError, REMOTE
from gppylib.db import dbconn
from gppylib.gparray import GpArray, MODE_SYNCHRONIZED
from gppylib.operations.backup_utils import pg, escapeDoubleQuoteInSQLString
PARTITION_START_DATE = '2010-01-01'
PARTITION_END_DATE = '2013-01-01'
GET_APPENDONLY_DATA_TABLE_INFO_SQL ="""SELECT ALL_DATA_TABLES.oid, ALL_DATA_TABLES.schemaname, ALL_DATA_TABLES.tablename, OUTER_PG_CLASS.relname as tupletable FROM(
SELECT ALLTABLES.oid, ALLTABLES.schemaname, ALLTABLES.tablename FROM
(SELECT c.oid, n.nspname AS schemaname, c.relname AS tablename FROM pg_class c, pg_namespace n
WHERE n.oid = c.relnamespace) as ALLTABLES,
(SELECT n.nspname AS schemaname, c.relname AS tablename
FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace
WHERE c.relkind = 'r'::"char" AND c.oid > 16384 AND (c.relnamespace > 16384 or n.nspname = 'public')
EXCEPT
((SELECT x.schemaname, x.partitiontablename FROM
(SELECT distinct schemaname, tablename, partitiontablename, partitionlevel FROM pg_partitions) as X,
(SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel FROM pg_partitions group by (tablename, schemaname))
as Y
WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel != Y.maxlevel)
UNION (SELECT distinct schemaname, tablename FROM pg_partitions))) as DATATABLES
WHERE ALLTABLES.schemaname = DATATABLES.schemaname and ALLTABLES.tablename = DATATABLES.tablename AND ALLTABLES.oid not in (select reloid from pg_exttable)
) as ALL_DATA_TABLES, pg_appendonly, pg_class OUTER_PG_CLASS
WHERE ALL_DATA_TABLES.oid = pg_appendonly.relid
AND OUTER_PG_CLASS.oid = pg_appendonly.segrelid
"""
GET_ALL_AO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 'f'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
GET_ALL_CO_DATATABLES_SQL = """
%s AND pg_appendonly.columnstore = 't'
""" % GET_APPENDONLY_DATA_TABLE_INFO_SQL
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
if master_data_dir is None:
raise Exception('MASTER_DATA_DIRECTORY is not set')
def execute_sql(dbname, sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, sql)
conn.commit()
def execute_sql_singleton(dbname, sql):
result = None
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, sql)
if result is None:
raise Exception("error running query: %s" % sql)
return result
def has_exception(context):
if not hasattr(context, 'exception'):
return False
if context.exception:
return True
else:
return False
def run_command(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_cmd(command):
cmd = Command(name='run %s' % command, cmdStr='%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
print 'caught exception %s'%e
result = cmd.get_results()
return (result.rc, result.stdout, result.stderr)
def run_command_remote(context,command, host, source_file, export_mdd):
cmd = Command(name='run command %s'%command,
cmdStr='gpssh -h %s -e \'source %s; %s; %s\''%(host, source_file,export_mdd, command))
cmd.run(validateAfter=True)
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def run_gpcommand(context, command):
context.exception = None
cmd = Command(name='run %s' % command, cmdStr='$GPHOME/bin/%s' % command)
try:
cmd.run(validateAfter=True)
except ExecutionError, e:
context.exception = e
result = cmd.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
def check_stdout_msg(context, msg):
pat = re.compile(msg)
if not pat.search(context.stdout_message):
err_str = "Expected stdout string '%s' and found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_string_not_present_stdout(context, msg):
pat = re.compile(msg)
if pat.search(context.stdout_message):
err_str = "Did not expect stdout string '%s' but found: '%s'" % (msg, context.stdout_message)
raise Exception(err_str)
def check_err_msg(context, err_msg):
if not hasattr(context, 'exception'):
raise Exception('An exception was not raised and it was expected')
pat = re.compile(err_msg)
if not pat.search(context.error_message):
err_str = "Expected error string '%s' and found: '%s'" % (err_msg, context.error_message)
raise Exception(err_str)
def check_return_code(context, ret_code):
if context.ret_code != int(ret_code):
emsg = ""
if context.error_message:
emsg += context.error_message
raise Exception("expected return code '%s' does not equal actual return code '%s' %s" % (ret_code, context.ret_code, emsg))
def check_database_is_running(context):
if not 'PGPORT' in os.environ:
raise Exception('PGPORT should be set')
pgport = int(os.environ['PGPORT'])
running_status = chk_local_db_running(master_data_dir, pgport)
gpdb_running = running_status[0] and running_status[1] and running_status[2] and running_status[3]
return gpdb_running
def start_database_if_not_started(context):
if not check_database_is_running(context):
start_database(context)
def start_database(context):
run_gpcommand(context, 'gpstart -a')
if context.exception:
raise context.exception
def stop_database_if_started(context):
if check_database_is_running(context):
stop_database(context)
def stop_database(context):
run_gpcommand(context, 'gpstop -M fast -a')
if context.exception:
raise context.exception
def getRows(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
results = curs.fetchall()
return results
def getRow(dbname, exec_sql):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, exec_sql)
result = curs.fetchone()
return result
def check_db_exists(dbname, host=None, port=0, user=None):
LIST_DATABASE_SQL = 'select datname from pg_database'
results = []
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname='template1')) as conn:
curs = dbconn.execSQL(conn, LIST_DATABASE_SQL)
results = curs.fetchall()
for result in results:
if result[0] == dbname:
return True
return False
def create_database_if_not_exists(context, dbname, host=None, port=0, user=None):
if not check_db_exists(dbname, host, port, user):
create_database(context, dbname, host, port, user)
def create_database(context, dbname=None, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
createdb_cmd = 'createdb %s' % dbname
else:
createdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "create database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_command(context, createdb_cmd)
if context.exception:
time.sleep(1)
continue
if check_db_exists(dbname, host, port, user):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception("create database for '%s' failed after %d attempts" % (dbname, LOOPS))
def clear_all_saved_data_verify_files(context):
current_dir = os.getcwd()
data_dir = os.path.join(current_dir, './gppylib/test/data')
cmd = 'rm %s/*' % data_dir
run_command(context, cmd)
def get_table_data_to_file(filename, tablename, dbname):
current_dir = os.getcwd()
filename = os.path.join(current_dir, './gppylib/test/data', filename)
order_sql = """
select string_agg(a, ',')
from (
select generate_series(1,c.relnatts+1) as a
from pg_class as c
inner join pg_namespace as n
on c.relnamespace = n.oid
where (n.nspname || '.' || c.relname = E'%s')
or c.relname = E'%s'
) as q;
""" % (pg.escape_string(tablename), pg.escape_string(tablename))
query = order_sql
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
try:
res = dbconn.execSQLForSingleton(conn, query)
# check if tablename is fully qualified <schema_name>.<table_name>
if '.' in tablename:
schema_name, table_name = tablename.split('.')
data_sql = '''COPY (select gp_segment_id, * from "%s"."%s" order by %s) TO '%s' ''' % (escapeDoubleQuoteInSQLString(schema_name, False),
escapeDoubleQuoteInSQLString(table_name, False), res, filename)
else:
data_sql = '''COPY (select gp_segment_id, * from "%s" order by %s) TO '%s' ''' %(escapeDoubleQuoteInSQLString(tablename, False), res, filename)
query = data_sql
dbconn.execSQL(conn, query)
conn.commit()
except Exception as e:
print "Cannot execute the query '%s' on the connection %s" % (query, str(dbconn.DbURL(dbname=dbname)))
print "Exception: %s" % str(e)
conn.close()
def diff_backup_restore_data(context, backup_file, restore_file):
if not filecmp.cmp(backup_file, restore_file):
raise Exception('%s and %s do not match' % (backup_file, restore_file))
def validate_restore_data(context, tablename, dbname, backedup_table=None):
filename = tablename.strip() + "_restore"
get_table_data_to_file(filename, tablename, dbname)
current_dir = os.getcwd()
if backedup_table != None:
backup_file = os.path.join(current_dir, './gppylib/test/data', backedup_table.strip() + "_backup")
else:
backup_file = os.path.join(current_dir, './gppylib/test/data', tablename.strip() + "_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', tablename.strip() + "_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def validate_restore_data_in_file(context, tablename, dbname, file_name, backedup_table=None):
filename = file_name + "_restore"
get_table_data_to_file(filename, tablename, dbname)
current_dir = os.getcwd()
if backedup_table != None:
backup_file = os.path.join(current_dir, './gppylib/test/data', backedup_table.strip() + "_backup")
else:
backup_file = os.path.join(current_dir, './gppylib/test/data', file_name + "_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', file_name + "_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def validate_db_data(context, dbname, expected_table_count):
tbls = get_table_names(dbname)
if len(tbls) != expected_table_count:
raise Exception("db %s does not have expected number of tables %d != %d" % (dbname, expected_table_count, len(tbls)))
for t in tbls:
name = "%s.%s" % (t[0], t[1])
validate_restore_data(context, name, dbname)
def get_segment_hostnames(context, dbname):
sql = "select distinct(hostname) from gp_segment_configuration where content != -1;"
return getRows(dbname, sql)
def backup_db_data(context, dbname):
tbls = get_table_names(dbname)
for t in tbls:
nm = "%s.%s" % (t[0], t[1])
backup_data(context, nm, dbname)
def backup_data(context, tablename, dbname):
filename = tablename + "_backup"
get_table_data_to_file(filename, tablename, dbname)
def backup_data_to_file(context, tablename, dbname, filename):
filename = filename + "_backup"
get_table_data_to_file(filename, tablename, dbname)
def check_partition_table_exists(context, dbname, schemaname, table_name, table_type=None, part_level=1, part_number=1):
partitions = get_partition_names(schemaname, table_name, dbname, part_level, part_number)
if not partitions:
return False
return check_table_exists(context, dbname, partitions[0][0].strip(), table_type)
def check_table_exists(context, dbname, table_name, table_type=None, host=None, port=0, user=None):
if '.' in table_name:
schemaname, tablename = table_name.split('.')
SQL = """
select c.oid, c.relkind, c.relstorage, c.reloptions
from pg_class c, pg_namespace n
where c.relname = E'%s' and n.nspname = E'%s' and c.relnamespace = n.oid;
""" % (pg.escape_string(tablename), pg.escape_string(schemaname))
else:
SQL = """
select oid, relkind, relstorage, reloptions \
from pg_class \
where relname = E'%s'; \
""" % pg.escape_string(table_name)
table_row = None
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
try:
table_row = dbconn.execSQLForSingletonRow(conn, SQL)
except Exception as e:
context.exception = e
return False
if table_type is None:
return True
if table_row[2] == 'a':
original_table_type = 'ao'
elif table_row[2] == 'c':
original_table_type = 'co'
elif table_row[2] == 'h':
original_table_type = 'heap'
elif table_row[2] == 'x':
original_table_type = 'external'
elif table_row[2] == 'v':
original_table_type = 'view'
else:
raise Exception('Unknown table type %s' % table_row[2])
if original_table_type != table_type.strip():
return False
return True
def check_pl_exists(context, dbname, lan_name):
SQL = """select count(*) from pg_language where lanname='%s';""" % lan_name
lan_count = getRows(dbname, SQL)[0][0]
if lan_count == 0:
return False
return True
def check_constraint_exists(context, dbname, conname):
SQL = """select count(*) from pg_constraint where conname='%s';""" % conname
con_count = getRows(dbname, SQL)[0][0]
if con_count == 0:
return False
return True
def drop_external_table_if_exists(context, table_name, dbname):
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external'):
drop_external_table(context, table_name=table_name, dbname=dbname)
def drop_table_if_exists(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table if exists %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
def drop_external_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop external table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, table_type='external', host=host, port=port, user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def drop_table(context, table_name, dbname, host=None, port=0, user=None):
SQL = 'drop table %s' % table_name
with dbconn.connect(dbconn.DbURL(hostname=host, username=user, port=port, dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_table_exists(context, table_name=table_name, dbname=dbname, host=host, port=port, user=user):
raise Exception('Unable to successfully drop the table %s' % table_name)
def check_schema_exists(context, schema_name, dbname):
schema_check_sql = "select * from pg_namespace where nspname='%s';" % schema_name
if len(getRows(dbname, schema_check_sql)) < 1:
return False
return True
def drop_schema_if_exists(context, schema_name, dbname):
if check_schema_exists(context, schema_name, dbname):
drop_schema(context, schema_name, dbname)
def drop_schema(context, schema_name, dbname):
SQL = 'drop schema %s cascade' % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, SQL)
conn.commit()
if check_schema_exists(context, schema_name, dbname):
raise Exception('Unable to successfully drop the schema %s' % schema_name)
def validate_table_data_on_segments(context, tablename, dbname):
seg_data_sql = "select gp_segment_id, count(*) from gp_dist_random('%s') group by gp_segment_id;" % tablename
rows = getRows(dbname, seg_data_sql)
for row in rows:
if row[1] == '0' :
raise Exception('Data not present in segment %s' % row[0])
def get_table_names(dbname):
sql = """
SELECT n.nspname AS schemaname, c.relname AS tablename\
FROM pg_class c\
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\
LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace\
WHERE c.relkind = 'r'::"char" AND c.oid > 16384 AND (c.relnamespace > 16384 or n.nspname = 'public')
AND n.nspname NOT LIKE 'pg_temp_%'
"""
return getRows(dbname, sql)
def get_partition_tablenames(tablename, dbname, part_level = 1):
child_part_sql = "select partitiontablename from pg_partitions where tablename='%s' and partitionlevel=%s;" % (tablename, part_level)
rows = getRows(dbname, child_part_sql)
return rows
def get_partition_names(schemaname, tablename, dbname, part_level, part_number):
part_num_sql = """select partitionschemaname || '.' || partitiontablename from pg_partitions
where schemaname='%s' and tablename='%s'
and partitionlevel=%s and partitionposition=%s;""" % (schemaname, tablename, part_level, part_number)
rows = getRows(dbname, part_num_sql)
return rows
def validate_part_table_data_on_segments(context, tablename, part_level, dbname):
rows = get_partition_tablenames(tablename, dbname, part_level)
for part_tablename in rows :
seg_data_sql = "select gp_segment_id, count(*) from gp_dist_random('%s') group by gp_segment_id;" % part_tablename[0]
rows = getRows(dbname, seg_data_sql)
for row in rows:
if row[1] == '0' :
raise Exception('Data not present in segment %s' % row[0])
def validate_mixed_partition_storage_types(context, tablename, dbname):
partition_names = get_partition_tablenames(tablename, dbname, part_level = 1)
for position, partname in enumerate(partition_names):
if position in(0, 2, 5, 7):
storage_type = 'c'
elif position in(1, 3, 6, 8):
storage_type = 'a'
else:
storage_type = 'h'
for part in partname:
validate_storage_type(context, part, storage_type, dbname)
def validate_storage_type(context, partname, storage_type, dbname):
storage_type_sql = "select oid::regclass, relstorage from pg_class where oid = '%s'::regclass;" % (partname)
rows = getRows(dbname, storage_type_sql)
for row in rows:
if row[1].strip() != storage_type.strip():
raise Exception("The storage type of the partition %s is not as expected %s "% (row[1], storage_type))
def create_mixed_storage_partition(context, tablename, dbname):
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table %s (%s) Distributed randomly \
Partition by list(Column2) \
Subpartition by range(Column3) Subpartition Template ( \
subpartition s_1 start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
subpartition s_2 start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
subpartition s_3 start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
subpartition s_4 start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
subpartition s_5 start(date '2014-01-01') end(date '2015-01-01') ) \
(partition p1 values('backup') , partition p2 values('restore')) \
;" % (tablename, table_definition)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
conn.commit()
populate_partition(tablename, '2010-01-01', dbname, 0)
def create_external_partition(context, tablename, dbname, port, filename):
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table %s (%s) Distributed randomly \
Partition by range(Column3) ( \
partition p_1 start(date '2010-01-01') end(date '2011-01-01') with (appendonly=true, orientation=column, compresstype=zlib, compresslevel=1), \
partition p_2 start(date '2011-01-01') end(date '2012-01-01') with (appendonly=true, orientation=row, compresstype=zlib, compresslevel=1), \
partition s_3 start(date '2012-01-01') end(date '2013-01-01') with (appendonly=true, orientation=column), \
partition s_4 start(date '2013-01-01') end(date '2014-01-01') with (appendonly=true, orientation=row), \
partition s_5 start(date '2014-01-01') end(date '2015-01-01') ) \
;" % (tablename, table_definition)
master_hostname = get_master_hostname();
create_ext_table_str = "Create readable external table %s_ret (%s) \
location ('gpfdist://%s:%s/%s') \
format 'csv' encoding 'utf-8' \
log errors segment reject limit 1000 \
;" % (tablename, table_definition, master_hostname[0][0].strip(), port, filename)
alter_table_str = "Alter table %s exchange partition p_2 \
with table %s_ret without validation \
;" % (tablename, tablename)
drop_table_str = "Drop table %s_ret;" % (tablename)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
dbconn.execSQL(conn, create_ext_table_str)
dbconn.execSQL(conn, alter_table_str)
dbconn.execSQL(conn, drop_table_str)
conn.commit()
populate_partition(tablename, '2010-01-01', dbname, 0, 100)
def modify_partition_data(context, tablename, dbname, partitionnum):
# ONLY works for partition 1 to 3
if partitionnum == 1:
year = '2010'
elif partitionnum == 2:
year = '2011'
elif partitionnum == 3:
year = '2012'
else:
raise Exception("BAD PARAM to modify_partition_data %s" % partitionnum)
cmdStr = """ echo "90,backup,%s-12-30" | psql -d %s -c "copy %s from stdin delimiter ',';" """ % (year, dbname, tablename)
for i in range(10):
cmd = Command(name='insert data into %s' % tablename, cmdStr=cmdStr)
cmd.run(validateAfter=True)
def modify_data(context, tablename, dbname):
cmdStr = 'psql -d %s -c "copy %s to stdout;" | psql -d %s -c "copy %s from stdin;"' % (dbname, tablename, dbname, tablename)
cmd = Command(name='insert data into %s' % tablename, cmdStr=cmdStr)
cmd.run(validateAfter=True)
def add_partition(context, partitionnum, tablename, dbname):
alter_table_str = "alter table %s add default partition p%s; insert into %s select i+%d, 'update', i + date '%s' from generate_series(0,1094) as i" \
% (tablename, partitionnum, tablename, int(partitionnum), PARTITION_START_DATE)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, alter_table_str)
conn.commit()
def drop_partition(context, partitionnum, tablename, dbname):
alter_table_str = "alter table %s drop partition p%s;" % (tablename, partitionnum)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, alter_table_str)
conn.commit()
def create_partition(context, tablename, storage_type, dbname, compression_type=None, partition=True, rowcount=1094, with_data=True, host=None, port=0, user=None):
interval = '1 year'
table_definition = 'Column1 int, Column2 varchar(20), Column3 date'
create_table_str = "Create table " + tablename + "(" + table_definition + ")"
storage_type_dict = {'ao':'row', 'co':'column'}
part_table = " Distributed Randomly Partition by list(Column2) \
Subpartition by range(Column3) Subpartition Template \
(start (date '%s') end (date '%s') every (interval '%s')) \
(Partition p1 values('backup') , Partition p2 values('restore')) " \
%(PARTITION_START_DATE, PARTITION_END_DATE, interval)
if storage_type == "heap":
create_table_str = create_table_str
if partition:
create_table_str = create_table_str + part_table
elif storage_type == "ao" or storage_type == "co":
create_table_str = create_table_str + " WITH(appendonly = true, orientation = %s) " % storage_type_dict[storage_type]
if compression_type is not None:
create_table_str = create_table_str[:-2] + ", compresstype = " + compression_type + ") "
if partition:
create_table_str = create_table_str + part_table
create_table_str = create_table_str + ";"
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, create_table_str)
conn.commit()
if with_data:
populate_partition(tablename, PARTITION_START_DATE, dbname, 0, rowcount, host, port, user)
# same data size as populate partition, but different values
def populate_partition_diff_data_same_eof(tablename, dbname):
populate_partition(tablename, PARTITION_START_DATE, dbname, 1)
def populate_partition_same_data(tablename, dbname):
populate_partition(tablename, PARTITION_START_DATE, dbname, 0)
def populate_partition(tablename, start_date, dbname, data_offset, rowcount=1094, host=None, port=0, user=None):
insert_sql_str = "insert into %s select i+%d, 'backup', i + date '%s' from generate_series(0,%d) as i" %(tablename, data_offset, start_date, rowcount)
insert_sql_str += "; insert into %s select i+%d, 'restore', i + date '%s' from generate_series(0,%d) as i" %(tablename, data_offset, start_date, rowcount)
with dbconn.connect(dbconn.DbURL(hostname=host, port=port, username=user, dbname=dbname)) as conn:
dbconn.execSQL(conn, insert_sql_str)
conn.commit()
def create_indexes(context, table_name, indexname, dbname):
btree_index_sql = "create index btree_%s on %s using btree(column1);" % (indexname, table_name)
bitmap_index_sql = "create index bitmap_%s on %s using bitmap(column3);" % (indexname, table_name)
index_sql = btree_index_sql + bitmap_index_sql
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, index_sql)
conn.commit()
validate_index(context, table_name, dbname)
def validate_index(context, table_name, dbname):
index_sql = "select count(indexrelid::regclass) from pg_index, pg_class where indrelid = '%s'::regclass group by indexrelid;" % table_name
rows = getRows(dbname, index_sql)
if len(rows) != 2:
raise Exception('Index creation was not successful. Expected 2 rows does not match %d rows' % result)
def create_schema(context, schema_name, dbname):
if not check_schema_exists(context, schema_name, dbname):
schema_sql = "create schema %s" % schema_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, schema_sql)
conn.commit()
def create_int_table(context, table_name, table_type='heap', dbname='testdb'):
CREATE_TABLE_SQL = None
NROW = 1000
table_type = table_type.upper()
if table_type == 'AO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE) as select generate_series(1,%d) as c1' % (table_name, NROW)
elif table_type == 'CO':
CREATE_TABLE_SQL = 'create table %s WITH(APPENDONLY=TRUE, orientation=column) as select generate_series(1, %d) as c1' % (table_name, NROW)
elif table_type == 'HEAP':
CREATE_TABLE_SQL = 'create table %s as select generate_series(1, %d) as c1' % (table_name, NROW)
if CREATE_TABLE_SQL is None:
raise Exception('Invalid table type specified')
SELECT_TABLE_SQL = 'select count(*) from %s' % table_name
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, CREATE_TABLE_SQL)
conn.commit()
result = dbconn.execSQLForSingleton(conn, SELECT_TABLE_SQL)
if result != NROW:
raise Exception('Integer table creation was not successful. Expected %d does not match %d' %(NROW, result))
def drop_database(context, dbname, host=None, port=0, user=None):
LOOPS = 10
if host == None or port == 0 or user == None:
dropdb_cmd = 'dropdb %s' % dbname
else:
dropdb_cmd = 'psql -h %s -p %d -U %s -d template1 -c "drop database %s"' % (host,
port, user, dbname)
for i in range(LOOPS):
context.exception = None
run_gpcommand(context, dropdb_cmd)
if context.exception:
time.sleep(1)
continue
if not check_db_exists(dbname):
return
time.sleep(1)
if context.exception:
raise context.exception
raise Exception('db exists after dropping: %s' % dbname)
def drop_database_if_exists(context, dbname=None, host=None, port=0, user=None):
if check_db_exists(dbname, host=host, port=port, user=user):
drop_database(context, dbname, host=host, port=port, user=user)
def run_on_all_segs(context, dbname, query):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
with dbconn.connect(dbconn.DbURL(dbname=dbname, hostname=seg.getSegmentHostName(), port=seg.getSegmentPort()), utility=True) as conn:
dbconn.execSQL(conn, query)
conn.commit()
def get_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='ifconfig nic', cmdStr='sudo /sbin/ifconfig %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
return 'UP' in cmd.get_results().stdout
def bring_nic_down(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring down nic', cmdStr='sudo /sbin/ifdown %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if get_nic_up(hostname, nic):
raise Exception('Unable to bring down nic %s on host %s' % (nic, hostname))
def bring_nic_up(hostname, nic):
address = hostname + '-cm'
cmd = Command(name='bring up nic', cmdStr='sudo /sbin/ifup %s' % nic, remoteHost=address, ctxt=REMOTE)
cmd.run(validateAfter=True)
if not get_nic_up(hostname, nic):
raise Exception('Unable to bring up nic %s on host %s' % (nic, hostname))
def are_segments_synchronized():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.mode != MODE_SYNCHRONIZED:
return False
return True
def get_distribution_policy(dbname):
filename = dbname.strip() + "_dist_policy_backup"
get_dist_policy_to_file(filename, dbname)
def get_dist_policy_to_file(filename, dbname):
dist_policy_sql = " \
SELECT \
c.relname as tablename, p.attrnums as distribution_policy \
FROM \
pg_class c \
INNER JOIN \
gp_distribution_policy p \
ON (c.relfilenode = p.localoid) \
AND \
c.relstorage != 'x' \
ORDER BY c.relname"
current_dir = os.getcwd()
filename = os.path.join(current_dir, './gppylib/test/data', filename)
data_sql = "COPY (%s) TO '%s'" %(dist_policy_sql, filename)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, data_sql)
conn.commit()
def validate_distribution_policy(context, dbname):
filename = dbname.strip() + "_dist_policy_restore"
get_dist_policy_to_file(filename, dbname)
current_dir = os.getcwd()
backup_file = os.path.join(current_dir, './gppylib/test/data', dbname.strip() + "_dist_policy_backup")
restore_file = os.path.join(current_dir, './gppylib/test/data', dbname.strip() + "_dist_policy_restore")
diff_backup_restore_data(context, backup_file, restore_file)
def check_row_count(tablename, dbname, nrows):
NUM_ROWS_QUERY = 'select count(*) from %s' % tablename
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
def check_empty_table(tablename, dbname):
check_row_count(tablename, dbname, 0)
def match_table_select(context, src_tablename, src_dbname, dest_tablename, dest_dbname, orderby=None, options=''):
if orderby != None :
dest_tbl_qry = 'psql -d %s -c \'select * from %s order by %s\' %s' % (dest_dbname, dest_tablename, orderby, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s order by %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, orderby, options)
else:
dest_tbl_qry = 'psql -d %s -c \'select * from %s\' %s' % (dest_dbname, dest_tablename, options)
src_tbl_qry = '''psql -p %s -h %s -U %s -d %s -c \'select * from %s\' %s''' % (
os.environ.get('GPTRANSFER_SOURCE_PORT'),
os.environ.get('GPTRANSFER_SOURCE_HOST'),
os.environ.get('GPTRANSFER_SOURCE_USER'),
src_dbname, src_tablename, options)
(_, dest_content, _) = run_cmd(dest_tbl_qry)
(_, src_content, _) = run_cmd(src_tbl_qry)
if src_content != dest_content:
raise Exception('''table %s in database %s of source system does not match rows with table %s in database %s of destination system.\n
destination table content:\n%s\n
source table content:\n%s\n''' % (
src_tablename,src_dbname, dest_tablename, dest_dbname, dest_content, src_content))
def get_master_hostname(dbname='template1'):
master_hostname_sql = "select distinct hostname from gp_segment_configuration where content=-1 and role='p'"
return getRows(dbname, master_hostname_sql)
def get_hosts_and_datadirs(dbname='template1'):
get_hosts_and_datadirs_sql = "select hostname, fselocation from gp_segment_configuration, pg_filespace_entry where fsedbid = dbid and role='p';"
return getRows(dbname, get_hosts_and_datadirs_sql)
def get_hosts(dbname='template1'):
get_hosts_sql = "select distinct hostname from gp_segment_configuration where role='p';"
return getRows(dbname, get_hosts_sql)
def get_backup_dirs_for_hosts(dbname='template1'):
get_backup_dir_sql = "select hostname,f.fselocation from pg_filespace_entry f inner join gp_segment_configuration g on f.fsedbid=g.dbid and g.role='p'"
results = getRows(dbname, get_backup_dir_sql)
dir_map = {}
for res in results:
host,dir = res
dir_map.setdefault(host,[]).append(dir)
return dir_map
def cleanup_backup_files(context, dbname, location=None):
dir_map = get_backup_dirs_for_hosts(dbname)
for host in dir_map:
if location:
cmd_str = "ssh %s 'DIR=%s;if [ -d \"$DIR/db_dumps/\" ]; then rm -rf $DIR/db_dumps $DIR/gpcrondump.pid; fi'"
cmd = cmd_str % (host, location)
else:
cmd_str = "ssh %s 'for DIR in %s; do if [ -d \"$DIR/db_dumps/\" ]; then rm -rf $DIR/db_dumps $DIR/gpcrondump.pid; fi; done'"
cmd = cmd_str % (host, " ".join(dir_map[host]))
run_command(context, cmd)
if context.exception:
raise context.exception
def cleanup_report_files(context, master_data_dir):
if not master_data_dir:
raise Exception("master_data_dir not specified in cleanup_report_files")
if master_data_dir.strip() == '/':
raise Exception("Can't call cleanup_report_files on root directory")
file_pattern = "gp_*.rpt"
cleanup_cmd = "rm -f %s/%s" % (master_data_dir, file_pattern)
run_command(context, cleanup_cmd)
if context.exception:
raise context.exception
def truncate_table(dbname, tablename):
TRUNCATE_SQL = 'TRUNCATE %s' % tablename
execute_sql(dbname, TRUNCATE_SQL)
def verify_truncate_in_pg_stat_last_operation(context, dbname, oid):
VERIFY_TRUNCATE_SQL = """SELECT *
FROM pg_stat_last_operation
WHERE objid = %d and staactionname = 'TRUNCATE' """ % oid
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
row = dbconn.execSQLForSingletonRow(conn, VERIFY_TRUNCATE_SQL)
if len(row) != 7:
raise Exception('Invalid number of colums %d' % len(row))
if row[2] != 'TRUNCATE':
raise Exception('Actiontype not expected TRUNCATE "%s"' % row[2])
if row[5]:
raise Exception('Subtype for TRUNCATE operation is not empty %s' % row[5])
def verify_truncate_not_in_pg_stat_last_operation(context, dbname, oid):
VERIFY_TRUNCATE_SQL = """SELECT count(*)
FROM pg_stat_last_operation
WHERE objid = %d and staactionname = 'TRUNCATE' """ % oid
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
thecount = dbconn.execSQLForSingleton(conn, VERIFY_TRUNCATE_SQL)
if thecount != 0:
raise Exception("Found %s rows from query '%s' should be 0" % (thecount, VERIFY_TRUNCATE_SQL))
def get_table_oid(context, dbname, schema, tablename):
OID_SQL = """SELECT c.oid
FROM pg_class c, pg_namespace n
WHERE c.relnamespace = n.oid AND c.relname = '%s' AND n.nspname = '%s'""" % (tablename, schema)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
oid = dbconn.execSQLForSingleton(conn, OID_SQL)
return oid
def insert_numbers(dbname, tablename, lownum, highnum):
sql = "insert into %s select generate_series(%s, %s)" % (tablename, lownum, highnum)
execute_sql(dbname, sql)
def verify_integer_tuple_counts(context, filename):
with open(filename, 'r') as fp:
for line in fp:
tupcount = line.split(',')[-1].strip()
if re.match("^\d+?\.\d+?$", tupcount) is not None:
raise Exception('Expected an integer tuplecount in file %s found float' % filename)
def create_fake_pg_aoseg_table(context, table, dbname):
sql = """CREATE TABLE %s(segno int,
eof double precision,
tupcount double precision,
modcount bigint,
varblockcount double precision,
eofuncompressed double precision)""" % table
execute_sql(dbname, sql)
def insert_row(context, row_values, table, dbname):
sql = """INSERT INTO %s values(%s)""" % (table, row_values)
execute_sql(dbname, sql)
def copy_file_to_all_db_hosts(context, filename):
hosts_set = set()
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for seg in gparray.getDbList():
if seg.isSegmentPrimary():
hosts_set.add(seg.getSegmentAddress())
hostfile = '/tmp/copy_host_file.behave'
with open(hostfile, 'w') as fd:
for h in hosts_set:
fd.write('%s\n' % h)
cmd = 'gpscp -f %s %s =:%s' % (hostfile, filename, filename)
run_command(context, cmd)
if context.exception:
raise Exception("FAIL: '%s' '%s'" % (cmd, context.exception.__str__()))
os.remove(hostfile)
def create_large_num_partitions(table_type, table_name, db_name, num_partitions=None):
if table_type == "ao":
condition = "with(appendonly=true)"
elif table_type == "co":
condition = "with(appendonly=true, orientation=column)"
else:
condition = ""
if num_partitions is None:
create_large_partitions_sql = """
create table %s (column1 int, column2 int) %s partition by range(column1) subpartition by range(column2) subpartition template(start(1) end(75) every(1)) (start(1) end(75) every(1))
""" % (table_name, condition)
else:
create_large_partitions_sql = """
create table %s (column1 int, column2 int) %s partition by range(column1) (start(1) end(%d) every(1))
""" % (table_name, condition, num_partitions)
execute_sql(db_name, create_large_partitions_sql)
if '.' in table_name:
schema, table = table_name.split('.')
verify_table_exists_sql = """select count(*) from pg_class c, pg_namespace n
where c.relname = E'%s' and n.nspname = E'%s' and c.relnamespace = n.oid;
""" % (table, schema)
else:
verify_table_exists_sql = """select count(*) from pg_class where relname = E'%s'""" % table_name
num_rows = getRows(db_name, verify_table_exists_sql)[0][0]
if num_rows != 1:
raise Exception('Creation of table "%s:%s" failed. Num rows in pg_class = %s' % (db_name, table_name, num_rows))
def validate_num_restored_tables(context, num_tables, dbname):
tbls = get_table_names(dbname)
count_query = """select count(*) from %s"""
num_validate_tables = 0
for t in tbls:
name = '%s.%s' % (t[0], t[1])
count = getRows(dbname, count_query % name)[0][0]
if count == 0:
continue
else:
validate_restore_data(context, name, dbname)
num_validate_tables += 1
if num_validate_tables != int(num_tables.strip()):
raise Exception('Invalid number of tables were restored. Expected "%s", Actual "%s"' % (num_tables, num_validate_tables))
def get_partition_list(partition_type, dbname):
if partition_type == 'ao':
sql = GET_ALL_AO_DATATABLES_SQL
elif partition_type == 'co':
sql = GET_ALL_CO_DATATABLES_SQL
partition_list = getRows(dbname, sql)
for line in partition_list:
if len(line) != 4:
raise Exception('Invalid results from query to get all AO tables: [%s]' % (','.join(line)))
return partition_list
def verify_stats(dbname, partition_info):
for (oid, schemaname, partition_name, tupletable) in partition_info:
tuple_count_sql = "select to_char(sum(tupcount::bigint), '999999999999999999999') from pg_aoseg.%s" % tupletable
tuple_count = getRows(dbname, tuple_count_sql)[0][0]
if tuple_count:
tuple_count = tuple_count.strip()
else:
tuple_count = '0'
validate_tuple_count(dbname, schemaname, partition_name, tuple_count)
def validate_tuple_count(dbname, schemaname, partition_name, tuple_count):
sql = 'select count(*) from %s.%s' % (schemaname, partition_name)
row_count = getRows(dbname, sql)[0][0]
if int(row_count) != int(tuple_count):
raise Exception('Stats for the table %s.%s does not match. Stat count "%s" does not match the actual tuple count "%s"' % (schemaname, partition_name, tuple_count, row_count))
def validate_aoco_stats(context, dbname, table, expected_tupcount):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
schema, table = table.split('.')
sql = "SELECT relname FROM pg_class \
WHERE oid in (SELECT segrelid FROM pg_appendonly \
WHERE relid in (SELECT oid FROM pg_class \
WHERE relname = '%s' AND relnamespace = (SELECT oid FROM pg_namespace \
WHERE nspname = '%s')))" % (table, schema)
tname = dbconn.execSQLForSingleton(conn, sql)
sql = "select sum(tupcount) from pg_aoseg.%s" % tname.strip()
rows = getRows(dbname, sql)
tupcount = int(rows[0][0])
if tupcount != int(expected_tupcount):
raise Exception("%s has stats of %d rows in %s table and should have %s" % (table, tupcount, tname, expected_tupcount))
def validate_no_aoco_stats(context, dbname, table):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
sql = "select relname from pg_class where oid in (select segrelid from pg_appendonly where relid in (select oid from pg_class where relname = '%s'))" % table
tname = dbconn.execSQLForSingleton(conn, sql)
sql = "select tupcount from pg_aoseg.%s" % tname.strip()
rows = getRows(dbname, sql)
if len(rows) != 0:
raise Exception("%s has stats of %d rows in %s table and should be 0" % (table, int(rows[0][0]), tname))
def get_all_hostnames_as_list(context, dbname):
hosts = []
segs = get_segment_hostnames(context, dbname)
for seg in segs:
hosts.append(seg[0].strip())
masters = get_master_hostname(dbname)
for master in masters:
hosts.append(master[0].strip())
return hosts
def get_pid_for_segment(seg_data_dir, seg_host):
cmd = Command(name='get list of postmaster processes',
cmdStr='ps -eaf | grep %s' % seg_data_dir,
ctxt=REMOTE,
remoteHost=seg_host)
cmd.run(validateAfter=True)
pid = None
results = cmd.get_results().stdout.strip().split('\n')
for res in results:
if 'grep' not in res:
pid = res.split()[1]
if pid is None:
return None
return int(pid)
def install_gppkg(context):
if 'GPPKG_PATH' not in os.environ:
raise Exception('GPPKG_PATH needs to be set in the environment to install gppkg')
if 'GPPKG_NAME' not in os.environ:
raise Exception('GPPKG_NAME needs to be set in the environment to install gppkg')
gppkg_path = os.environ['GPPKG_PATH']
gppkg_name = os.environ['GPPKG_NAME']
command = "gppkg --install %s/%s.gppkg" % (gppkg_path, gppkg_name)
run_command(context, command)
print "Install gppkg command: '%s', stdout: '%s', stderr: '%s'" % (command, context.stdout_message, context.error_message)
def enable_postgis_and_load_test_data_for_postgis_1(context):
if 'GPHOME' not in os.environ:
raise Exception('GPHOME needs to be set in the environment')
install_gppkg(context)
gphome = os.environ['GPHOME']
path = "%s/share/postgresql/contrib" % gphome
command = "psql -d opengeo -f %s/postgis.sql" % path
run_command(context, command)
command = "psql -d opengeo -f %s/spatial_ref_sys.sql" % path
run_command(context, command)
current_path = os.path.realpath(__file__)
current_dir = os.path.dirname(current_path)
postgis_data_dir = "%s/../behave/mgmt_utils/steps/data/postgis" % current_dir
command = "psql -d opengeo -f %s/nyc_census_blocks_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_neighborhoods_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_subway_stations_1.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_census_sociodata.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_streets_1.sql" % postgis_data_dir
run_command(context, command)
def enable_postgis_and_load_test_data(context):
if 'GPHOME' not in os.environ:
raise Exception('GPHOME needs to be set in the environment')
install_gppkg(context)
gphome = os.environ['GPHOME']
path = "%s/share/postgresql/contrib/postgis-2.0" % gphome
command = "psql -d opengeo -f %s/postgis.sql" % path
run_command(context, command)
command = "psql -d opengeo -f %s/spatial_ref_sys.sql" % path
run_command(context, command)
current_path = os.path.realpath(__file__)
current_dir = os.path.dirname(current_path)
postgis_data_dir = "%s/../behave/mgmt_utils/steps/data/postgis" % current_dir
command = "psql -d opengeo -f %s/nyc_census_blocks.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_neighborhoods.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_subway_stations.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_census_sociodata.sql" % postgis_data_dir
run_command(context, command)
command = "psql -d opengeo -f %s/nyc_streets.sql" % postgis_data_dir
run_command(context, command)
def kill_process(pid, host=None, sig=signal.SIGTERM):
if host is not None:
cmd = Command('kill process on a given host',
cmdStr='kill -%d %d' % (sig, pid),
ctxt=REMOTE,
remoteHost=host)
cmd.run(validateAfter=True)
else:
os.kill(pid, sig)
def get_num_segments(primary=True, mirror=True, master=True, standby=True):
gparray = GpArray.initFromCatalog(dbconn.DbURL())
primary_segments = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
mirror_segments = [seg for seg in gparray.getDbList() if seg.isSegmentMirror()]
num_segments = 0
if primary:
num_segments += len(primary_segments)
if mirror:
num_segments += len(mirror_segments)
if master and gparray.master is not None:
num_segments += 1
if standby and gparray.standbyMaster is not None:
num_segments += 1
return num_segments
def check_user_permissions(file_name, access_mode):
st = os.stat(file_name)
if access_mode == 'write':
return bool(st.st_mode & stat.S_IWUSR)
elif access_mode == 'read':
return bool(st.st_mode & stat.S_IRUSR)
elif access_mode == 'execute':
return bool(st.st_mode & stat.S_IXUSR)
else:
raise Exception('Invalid mode specified, should be read, write or execute only')
def get_change_tracking_segment_info():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for seg in gparray.getDbList():
if seg.isSegmentModeInChangeLogging():
return seg.getSegmentPort(), seg.getSegmentHostName()
def are_segments_running():
gparray = GpArray.initFromCatalog(dbconn.DbURL())
segments = gparray.getDbList()
for seg in segments:
if seg.status != 'u':
return False
return True
def modify_sql_file(file, hostport):
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
if line.find("gpfdist")>=0:
line = re.sub('(\d+)\.(\d+)\.(\d+)\.(\d+)\:(\d+)',hostport, line)
print str(re.sub('\n','',line))
def create_gpfilespace_config(host, port, user,fs_name, config_file, working_dir='/tmp'):
mirror_hosts = []
primary_hosts = []
standby_host = ''
master_host = ''
fspath_master = working_dir + '/fs_master'
fspath_standby = working_dir + '/fs_standby'
fspath_primary = working_dir + '/fs_primary'
fspath_mirror = working_dir + '/fs_mirror'
get_master_filespace_entry = 'psql -t -h %s -p %s -U %s -d template1 -c \" select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'p\' and content=-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_master_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_master_filespace_entry)
else:
file = open(config_file,'w')
file.write('filespace:%s\n'%fs_name)
result = out.split('\n')
for line in result:
if line.strip():
row = line.split('|')
row = [col.strip() for col in row]
hostname = row[0]
master_host = hostname
dbid = row[1]
fs_loc = os.path.join(fspath_master,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_standby_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'m\' and content=-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_standby_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_standby_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
standby_host= hostname
dbid = row[1]
fs_loc = os.path.join(fspath_standby,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_primary_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'p\' and content>-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_primary_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_primary_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
primary_hosts.append(hostname)
dbid = row[1]
fs_loc = os.path.join(fspath_primary,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
get_mirror_filespace_entry= 'psql -t -h %s -p %s -U %s -d template1 -c \"select hostname, dbid, fselocation from pg_filespace_entry, gp_segment_configuration where dbid=fsedbid and preferred_role =\'m\' and content>-1;\"'%(host, port, user)
(rc, out, err) = run_cmd(get_mirror_filespace_entry)
if rc != 0:
raise Exception('Exception from executing psql query: %s'% get_mirror_filespace_entry)
else:
result = out.split('\n')
file = open(config_file,'a')
for line in result:
if line.strip():
row = line.strip().split('|')
row = [col.strip() for col in row]
hostname = row[0]
mirror_hosts.append(hostname)
dbid = row[1]
fs_loc = os.path.join(fspath_mirror,os.path.split(row[2])[1])
file.write(hostname+':'+dbid+':'+fs_loc)
file.write('\n')
file.close()
for host in primary_hosts:
remove_dir(host,fspath_primary)
create_dir(host,fspath_primary)
for host in mirror_hosts:
remove_dir(host,fspath_mirror)
create_dir(host,fspath_mirror)
remove_dir(master_host,fspath_master)
remove_dir(standby_host,fspath_standby)
create_dir(master_host,fspath_master)
create_dir(standby_host,fspath_standby)
def remove_dir(host, directory):
cmd = 'gpssh -h %s -e \'rm -rf %s\''%(host, directory)
run_cmd(cmd)
def create_dir(host, directory):
cmd = 'gpssh -h %s -e \'mkdir -p %s\''%(host, directory)
run_cmd(cmd)
def wait_till_change_tracking_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_ct_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode =\'c\';"'%(host, port, user)
(rc, out, err) = run_cmd(num_ct_nodes)
if rc != 0:
raise Exception('Exception from executing psql query: %s'%num_ct_nodes)
else:
num_cl = int(out.strip())
count = 0
while(num_cl == 0):
time.sleep(30)
(rc, out, err) = run_cmd(num_ct_nodes)
num_cl = int(out.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in change tracking")
return (True,num_cl)
def wait_till_insync_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_unsync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode <> \'s\' or status<> \'u\';"'%(host, port, user)
(rc, out, err) = run_cmd(num_unsync_nodes)
if rc != 0:
raise Exception('Exception from executing psql query: %s'%num_unsync_nodes)
else:
num_unsync = int(out.strip())
count = 0
while(num_unsync > 0):
time.sleep(30)
(rc, out, err) = run_cmd(num_unsync_nodes)
num_unsync = int(out.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in sync transition")
return True
def wait_till_resync_transition(host='localhost', port=os.environ.get('PGPORT'), user=os.environ.get('USER')):
num_resync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode =\'r\';"'%(host, port, user)
num_insync_nodes = 'psql -t -h %s -p %s -U %s -d template1 -c "select count(*) from gp_segment_configuration where mode <>\'s\';"'%(host, port, user)
(rc1, out1, err1) = run_cmd(num_resync_nodes)
(rc2, out2, err2) = run_cmd(num_insync_nodes)
if rc1 !=0 or rc2 !=0:
raise Exception('Exception from executing psql query: %s'%num_unsync_nodes)
else:
num_resync = int(out1.strip())
num_insync = int(out2.strip())
count = 0
while(num_resync != num_insync):
time.sleep(30)
(rc1, out1, err1) = run_cmd(num_resync_nodes)
(rc2, out2, err2) = run_cmd(num_insync_nodes)
num_resync = int(out1.strip())
num_insync = int(out2.strip())
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in sync transition")
return True
def check_dump_dir_exists(context, dbname):
dir_map = get_backup_dirs_for_hosts(dbname)
cmd_str = "ssh %s 'for DIR in %s; do if [ -d \"$DIR/db_dumps/\" ]; then echo \"$DIR EXISTS\"; else echo \"$DIR NOT FOUND\"; fi; done'"
for host in dir_map:
cmd = cmd_str % (host, " ".join(dir_map[host]))
run_command(context, cmd)
if context.exception:
raise context.exception
if 'EXISTS' in context.stdout_message:
raise Exception("db_dumps directory is present in master/segments.")
def verify_restored_table_is_analyzed(context, table_name, dbname):
ROW_COUNT_SQL = """SELECT count(*) FROM %s""" % table_name
if table_name.find('.') != -1:
schema_name,table_name = table_name.split(".")
else:
schema_name = 'public'
schema_name = pg.escape_string(schema_name)
table_name = pg.escape_string(table_name)
ROW_COUNT_PG_CLASS_SQL = """SELECT reltuples FROM pg_class WHERE relname = '%s'
AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '%s')""" % (table_name, schema_name)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
curs = dbconn.execSQL(conn, ROW_COUNT_SQL)
rows = curs.fetchall()
curs = dbconn.execSQL(conn, ROW_COUNT_PG_CLASS_SQL)
rows_from_pgclass = curs.fetchall()
if rows == rows_from_pgclass:
return True
else:
return False
def analyze_database(context, dbname):
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, "analyze")
def delete_rows_from_table(context, dbname, table_name, column_name, info):
DELETE_SQL = """DELETE FROM %s WHERE %s = %s""" % (table_name, column_name, info)
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
dbconn.execSQL(conn, DELETE_SQL)
conn.commit()
def validate_parse_email_file(context, email_file_path):
if os.path.isfile(email_file_path) is False:
raise Exception("\'%s\' file does not exist." % email_file_path)
if email_file_path.split('.')[1] != "yaml":
raise Exception("\'%s\' is not \'.yaml\' file. File containing email details should be \'.yaml\' file." % email_file_path)
if (os.path.getsize(email_file_path) > 0) is False:
raise Exception("\'%s\' file is empty." % email_file_path)
email_key_list = ["DBNAME","FROM", "SUBJECT"]
try:
with open(email_file_path, 'r') as f:
doc = yaml.load(f)
context.email_details = doc['EMAIL_DETAILS']
for email in context.email_details:
for key in email.keys():
if key not in email_key_list:
raise Exception(" %s not present" % key)
except Exception as e:
raise Exception("\'%s\' file is not formatted properly." % email_file_path)
def check_count_for_specific_query(dbname, query, nrows):
NUM_ROWS_QUERY = '%s' % query
# We want to bubble up the exception so that if table does not exist, the test fails
with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn:
result = dbconn.execSQLForSingleton(conn, NUM_ROWS_QUERY)
if result != nrows:
raise Exception('%d rows in table %s.%s, expected row count = %d' % (result, dbname, tablename, nrows))
| apache-2.0 |
sadanandb/pmt | src/pyasm/search/upgrade/sql_parser.py | 6 | 10354 | #!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['TableData', 'SqlParser']
import sys, os, re
class Postgres(object):
def get_create_table(my):
return "CREATE TABLE"
def get_alter_table(my):
return "ALTER TABLE ONLY"
def get_add_constraint(my):
return "ADD CONSTRAINT"
def get_create_index(my):
return "CREATE INDEX"
def get_create_unique_index(my):
return "CREATE UNIQUE INDEX"
class TableData:
def __init__(my, table):
my.table = table
my.database = Postgres()
my.columns = {}
my.columns_order = []
my.constraints = []
my.indexes = []
my.rows = {}
def add_column(my, column, create):
my.columns[column] = create
my.columns_order.append(column)
def get_create_table(my):
create = ""
create += "--\n"
create += "-- Create table: %s\n" % my.table
create += "--\n"
create += "%s %s (\n" % (my.database.get_create_table(), my.table)
tmp = []
for column in my.columns_order:
value = my.columns[column]
tmp.append(" %s %s" % (column,value) )
create += ",\n".join(tmp)
create += "\n);\n"
for constraint in my.constraints:
create += "%s %s\n" % (my.database.get_alter_table(), my.table)
create += " %s\n" % constraint
for index in my.indexes:
#create += "%s %s\n" % (my.database.get_create_unique_index(), my.table)
create += "%s\n" % index
return create
def get_alter_table(my, column):
alter = "%s %s\n" % (my.database.get_alter_table(), my.table)
alter += " ADD COLUMN %s %s;" % (column, my.columns[column])
return alter
def get_diff(my,standard_data):
'''gets the sql difference between the two tables'''
# print add columns
extra_columns = [x for x in standard_data.columns if x not in my.columns]
for column in extra_columns:
print standard_data.get_alter_table(column)
#for column, definition in my.columns.items():
# if column in extra_columns:
# continue
# definition2 = data.columns[column]
# if definition != definition2:
# print definition2
def compare(my, data):
missing_columns = [x for x in my.columns if x not in data.columns]
extra_columns = [x for x in data.columns if x not in my.columns]
if missing_columns:
print "missing columns: ", missing_columns
if extra_columns:
print "extra columns: ", extra_columns
for extra_column in extra_columns:
print data.get_alter_table(extra_column)
missing_constraints = [x for x in my.constraints if x not in data.constraints]
extra_constraints = [x for x in data.constraints if x not in my.constraints]
if missing_constraints:
print "missing constraints: ", missing_constraints
if extra_constraints:
print "extra constraints: ", extra_constraints
class SqlParser:
def __init__(my):
my.tables = {}
my.database = Postgres()
def get_tables(my):
tables = my.tables.keys()
tables.sort()
return tables
def get_data(my, table):
if my.tables.has_key(table):
return my.tables[table]
else:
return TableData(table)
def _extract_values(my, expr, line):
p = re.compile(expr, re.DOTALL)
m = p.search(line)
if not m:
return []
values = m.groups()
return values
def _extract_value(my, expr, line):
values = my._extract_values(expr,line)
if not values:
return None
return values[0]
def _extract_table(my, expr, line):
table = my._extract_value(expr, line)
table = table.replace('"','')
return table
def parse(my, file_path):
# open file and read
file = open(file_path)
lines = file.readlines()
file.close()
create_table = None
alter_table = None
alter_table_line = None
tmp_line = ""
for line in lines:
line = line.rstrip()
line = line.rstrip(",")
line = line.lstrip()
# handle create
if line.startswith(my.database.get_create_table()):
expr = '%s "?(\w+)"? \(' % my.database.get_create_table()
create_table = my._extract_table(expr, line)
data = TableData(create_table)
my.tables[create_table] = data
continue
if create_table:
if line == ");":
create_table = None
continue
# handle the line
tmp = line.split()
column = tmp[0]
column = column.replace('"','')
create = " ".join(tmp[1:])
my.tables[create_table].add_column(column, create)
continue
# handle alter table
if line.startswith(my.database.get_alter_table()):
expr = '%s "?(\w+)"?' % my.database.get_alter_table()
alter_table = my._extract_table(expr, line)
alter_table_line = line
continue
if alter_table:
if line == "":
alter_table = None
continue
alter_table_line += " %s" % line
my.tables[alter_table].constraints.append(alter_table_line)
# handle create index
if line.startswith(my.database.get_create_index()):
expr = '%s \w+ ON "?(\w+)"?' % (my.database.get_create_index())
index_table = my._extract_table(expr, line)
if index_table:
my.tables[index_table].indexes.append(line)
# handle create index
if line.startswith(my.database.get_create_unique_index()):
expr = '%s \w+ ON "?(\w+)"?' % (my.database.get_create_unique_index())
index_table = my._extract_table(expr, line)
if index_table:
my.tables[index_table].indexes.append(line)
# handle inserts
if tmp_line != "" or line.startswith("INSERT INTO"):
# have to figure out is this is the complete line
if not line.endswith(";"):
tmp_line += "\n"+line
continue
if tmp_line != "":
line = tmp_line + "\n" + line
tmp_line = ""
expr = '%s "?(\w+)"? \(' % ("INSERT INTO")
data_table = my._extract_table(expr, line)
expr = '\((.*)\) VALUES \((.*)\);$'
info = my._extract_values(expr, line)
if not info:
print "Error: "
print line
raise Exception("Improper INSERT statement")
columns = info[0].split(", ")
columns = [ x.lstrip('"') for x in columns ]
columns = [ x.rstrip('"') for x in columns ]
values = info[1].split(", ")
values = [ x.lstrip("'") for x in values ]
values = [ x.rstrip("'") for x in values ]
rows = {}
for i in range(0, len(columns)):
rows[columns[i]] = values[i]
# ensure that the data object exists
if not my.tables.has_key(data_table):
data = TableData(create_table)
my.tables[data_table] = data
# store the data by the unique identifier
if data_table == "search_object":
primary_index = 1
elif "code" in columns:
primary_index = columns.index('code')
elif "id" in columns:
primary_index = columns.index('id')
else:
primary_index = 1
primary_value = values[primary_index]
my.tables[data_table].rows[primary_value] = rows
def compare(my, data, data2):
columns = data.columns
columns2 = data2.columns
diffs = []
pattern = "%-18s%-45s%-45s"
for column, schema in columns.items():
if not columns2.has_key(column):
database = "x"
else:
database = columns2[column]
if schema != database:
database = database
else:
#schema = "-"
#database = "-"
continue
diffs.append( pattern % (column,schema,database) )
for column2, database in columns2.items():
if not columns.has_key(column2):
schema = "x"
diffs.append( pattern % (column2,schema,database) )
if diffs:
print
print
print "Table: ", table
print "-"*20
print pattern % ("column","standard schema","target schema")
print "-"*110
for diff in diffs:
print diff
print "-"*110
# print constraints
constraints = data.constraints
constraints2 = data2.constraints
if constraints != constraints2:
print constraints
print constraints2
# print indexes
indexes = data.indexes
indexes2 = data2.indexes
if indexes != indexes:
print indexes
print indexes2
if __name__ == '__main__':
# test parser
import sys
db_parser = SqlParser()
db_parser.parse( sys.argv[1] )
| epl-1.0 |
zaccoz/odoo | addons/account/__openerp__.py | 190 | 7542 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'eInvoicing',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Accounting & Finance',
'description' : """
Accounting and Financial Management.
====================================
Financial and accounting module that covers:
--------------------------------------------
* General Accounting
* Cost/Analytic accounting
* Third party accounting
* Taxes management
* Budgets
* Customer and Supplier Invoices
* Bank statements
* Reconciliation process by partner
Creates a dashboard for accountants that includes:
--------------------------------------------------
* List of Customer Invoices to Approve
* Company Analysis
* Graph of Treasury
Processes like maintaining general ledgers are done through the defined Financial Journals (entry move line or grouping is maintained through a journal)
for a particular financial year and for preparation of vouchers there is a module named account_voucher.
""",
'website': 'https://www.odoo.com/page/billing',
'depends' : ['base_setup', 'product', 'analytic', 'board', 'edi', 'report'],
'data': [
'security/account_security.xml',
'security/ir.model.access.csv',
'account_menuitem.xml',
'report/account_invoice_report_view.xml',
'report/account_entries_report_view.xml',
'report/account_treasury_report_view.xml',
'report/account_report_view.xml',
'report/account_analytic_entries_report_view.xml',
'wizard/account_move_bank_reconcile_view.xml',
'wizard/account_use_model_view.xml',
'account_installer.xml',
'wizard/account_period_close_view.xml',
'wizard/account_reconcile_view.xml',
'wizard/account_unreconcile_view.xml',
'wizard/account_statement_from_invoice_view.xml',
'account_view.xml',
'account_report.xml',
'account_financial_report_data.xml',
'wizard/account_report_common_view.xml',
'wizard/account_invoice_refund_view.xml',
'wizard/account_fiscalyear_close_state.xml',
'wizard/account_chart_view.xml',
'wizard/account_tax_chart_view.xml',
'wizard/account_move_line_reconcile_select_view.xml',
'wizard/account_open_closed_fiscalyear_view.xml',
'wizard/account_move_line_unreconcile_select_view.xml',
'wizard/account_vat_view.xml',
'wizard/account_report_print_journal_view.xml',
'wizard/account_report_general_journal_view.xml',
'wizard/account_report_central_journal_view.xml',
'wizard/account_subscription_generate_view.xml',
'wizard/account_fiscalyear_close_view.xml',
'wizard/account_state_open_view.xml',
'wizard/account_journal_select_view.xml',
'wizard/account_change_currency_view.xml',
'wizard/account_validate_move_view.xml',
'wizard/account_report_general_ledger_view.xml',
'wizard/account_invoice_state_view.xml',
'wizard/account_report_partner_balance_view.xml',
'wizard/account_report_account_balance_view.xml',
'wizard/account_report_aged_partner_balance_view.xml',
'wizard/account_report_partner_ledger_view.xml',
'wizard/account_reconcile_partner_process_view.xml',
'wizard/account_automatic_reconcile_view.xml',
'wizard/account_financial_report_view.xml',
'wizard/pos_box.xml',
'project/wizard/project_account_analytic_line_view.xml',
'account_end_fy.xml',
'account_invoice_view.xml',
'data/account_data.xml',
'data/data_account_type.xml',
'data/configurable_account_chart.xml',
'account_invoice_workflow.xml',
'project/project_view.xml',
'project/project_report.xml',
'project/wizard/account_analytic_balance_report_view.xml',
'project/wizard/account_analytic_cost_ledger_view.xml',
'project/wizard/account_analytic_inverted_balance_report.xml',
'project/wizard/account_analytic_journal_report_view.xml',
'project/wizard/account_analytic_cost_ledger_for_journal_report_view.xml',
'project/wizard/account_analytic_chart_view.xml',
'partner_view.xml',
'product_view.xml',
'account_assert_test.xml',
'ir_sequence_view.xml',
'company_view.xml',
'edi/invoice_action_data.xml',
'account_bank_view.xml',
'res_config_view.xml',
'account_pre_install.yml',
'views/report_vat.xml',
'views/report_invoice.xml',
'views/report_trialbalance.xml',
'views/report_centraljournal.xml',
'views/report_overdue.xml',
'views/report_generaljournal.xml',
'views/report_journal.xml',
'views/report_salepurchasejournal.xml',
'views/report_partnerbalance.xml',
'views/report_agedpartnerbalance.xml',
'views/report_partnerledger.xml',
'views/report_partnerledgerother.xml',
'views/report_financial.xml',
'views/report_generalledger.xml',
'project/views/report_analyticbalance.xml',
'project/views/report_analyticjournal.xml',
'project/views/report_analyticcostledgerquantity.xml',
'project/views/report_analyticcostledger.xml',
'project/views/report_invertedanalyticbalance.xml',
'views/account.xml',
],
'qweb' : [
"static/src/xml/account_move_reconciliation.xml",
"static/src/xml/account_move_line_quickadd.xml",
"static/src/xml/account_bank_statement_reconciliation.xml",
],
'demo': [
'demo/account_demo.xml',
'project/project_demo.xml',
'project/analytic_account_demo.xml',
'demo/account_minimal.xml',
'demo/account_invoice_demo.xml',
'demo/account_bank_statement.xml',
'account_unit_test.xml',
],
'test': [
'test/account_test_users.yml',
'test/account_customer_invoice.yml',
'test/account_supplier_invoice.yml',
'test/account_change_currency.yml',
'test/chart_of_account.yml',
'test/account_period_close.yml',
'test/account_use_model.yml',
'test/account_validate_account_move.yml',
'test/test_edi_invoice.yml',
'test/account_report.yml',
'test/analytic_hierarchy.yml',
'test/account_fiscalyear_close.yml', #last test, as it will definitively close the demo fiscalyear
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rlr/fjord | vendor/packages/requests-2.7.0/requests/status_codes.py | 926 | 3200 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| bsd-3-clause |
lreis2415/AutoFuzSlpPos | autofuzslppos/Util.py | 2 | 2315 | # -*- coding: utf-8 -*-
"""Utility Classes and Functions
@author : Liangjun Zhu
@changelog:
- 15-07-31 lj - initial implementation
- 17-07-21 lj - reorganize and incorporate with pygeoc
"""
from __future__ import absolute_import, unicode_literals
import os
from io import open
import numpy
from pygeoc.raster import RasterUtilClass
def rpi_calculation(distdown, distup, rpi_outfile):
"""Calculate Relative Position Index (RPI)."""
down = RasterUtilClass.read_raster(distdown)
up = RasterUtilClass.read_raster(distup)
temp = down.data < 0
rpi_data = numpy.where(temp, down.noDataValue, down.data / (down.data + up.data))
RasterUtilClass.write_gtiff_file(rpi_outfile, down.nRows, down.nCols, rpi_data, down.geotrans,
down.srs, down.noDataValue, down.dataType)
def slope_rad_to_deg(tanslp, slp):
"""Convert slope from radius to slope."""
origin = RasterUtilClass.read_raster(tanslp)
temp = origin.data == origin.noDataValue
slpdata = numpy.where(temp, origin.noDataValue, numpy.arctan(origin.data) * 180. / numpy.pi)
RasterUtilClass.write_gtiff_file(slp, origin.nRows, origin.nCols, slpdata, origin.geotrans,
origin.srs, origin.noDataValue, origin.dataType)
def write_log(logfile, contentlist):
"""Write string or string list to log file."""
if os.path.exists(logfile):
log_status = open(logfile, 'a', encoding='utf-8')
else:
log_status = open(logfile, 'w', encoding='utf-8')
if isinstance(contentlist, list) or isinstance(contentlist, tuple):
for content in contentlist:
log_status.write('%s\n' % content)
else:
log_status.write('%s\n' % contentlist)
log_status.flush()
log_status.close()
def main():
"""TEST CODE"""
inf = r'C:\z_data_m\SEIMS2017\fuzslppos_ywz10m\slope_position_units\SLOPPOSITION.tif'
# inr = RasterUtilClass.read_raster(inf)
# inr.data[inr.data > 0] = 1.
# RasterUtilClass.write_gtiff_file(inf, inr.nRows, inr.nCols, inr.data,
# inr.geotrans, inr.srs, inr.noDataValue,
# inr.dataType)
RasterUtilClass.raster_to_gtiff(inf, inf, True, True)
if __name__ == '__main__':
main()
| gpl-2.0 |
foodszhang/kbengine | kbe/src/lib/python/Lib/http/cookies.py | 63 | 20859 | ####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
_flags = {'secure', 'httponly'}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if any(c not in LegalChars for c in key):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value))
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key == "secure":
append(str(self._reserved[key]))
elif key == "httponly":
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
\s* # Optional whitespace at start of cookie
(?P<key> # Start of group 'key'
""" + _LegalCharsPatt + r"""+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
""" + _LegalCharsPatt + r"""* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.match(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
if value is None:
if key.lower() in Morsel._flags:
M[key] = True
else:
M[key] = _unquote(value)
elif value is not None:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
| lgpl-3.0 |
nhazekam/cctools | chirp/src/bindings/python/chirp.binding.py | 1 | 22645 | ## @package ChirpPython
#
# Python Chirp bindings.
#
# The objects and methods provided by this package correspond to the native
# C API in @ref chirp_reli.h and chirp_swig_wrap.h
#
# The SWIG-based Python bindings provide a higher-level interface that
# revolves around:
#
# - @ref Chirp.Client
# - @ref Chirp.Stat
import os
import time
import json
import binascii
##
# Python Client object
#
# This class is used to create a chirp client
class Client(object):
##
# Create a new chirp client
#
# @param self Reference to the current task object.
# @param hostport The host:port of the server.
# @param timeout The time to wait for a server response on every request.
# @param authentication A list of prefered authentications. E.g., ['tickets', 'unix']
# @param debug Generate client debug output.
def __init__(self, hostport, timeout=60, authentication=None, tickets=None, debug=False):
self.hostport = hostport
self.timeout = timeout
if debug:
cctools_debug_config('chirp_python_client')
cctools_debug_flags_set('chirp')
if tickets and (authentication is None):
authentication = ['ticket']
self.__set_tickets(tickets)
if authentication is None:
auth_register_all()
else:
for auth in authentication:
auth_register_byname(auth)
self.identity = self.whoami()
if self.identity is '':
raise AuthenticationFailure(authentication)
def __exit__(self):
chirp_reli_disconnect(self.hostport)
def __del__(self):
chirp_reli_disconnect(self.hostport)
def __stoptime(self, absolute_stop_time=None, timeout=None):
if timeout is None:
timeout = self.timeout
if absolute_stop_time is None:
absolute_stop_time = time.time() + timeout
return absolute_stop_time
def __set_tickets(self, tickets):
tickets_str = None
if tickets is None:
try:
tickets_str = os.environ['CHIRP_CLIENT_TICKETS']
except KeyError:
tickets_str = None
else:
tickets_str = ','.join(tickets)
if tickets_str is not None:
auth_ticket_load(tickets_str)
##
# Returns a string with identity of the client according to the server.
#
# @param self Reference to the current task object.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def whoami(self, absolute_stop_time=None, timeout=None):
return chirp_wrap_whoami(self.hostport, self.__stoptime(absolute_stop_time, timeout))
##
# Returns a string with the ACL of the given directory.
# Throws an IOError on error (no such directory).
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def listacl(self, path='/', absolute_stop_time=None, timeout=None):
acls = chirp_wrap_listacl(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if acls is None:
raise IOError(path)
return acls.split('\n')
##
# Returns a string with the ACL of the given directory.
# Throws a GeneralError on error.
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param subject Target subject.
# @param rights Permissions to be granted.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def setacl(self, path, subject, rights, absolute_stop_time=None, timeout=None):
result = chirp_reli_setacl(self.hostport, path, subject, rights, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('setacl', result, [path, subject, rights])
return result
##
# Set the ACL for the given directory to be only for the rights to the calling user.
# Throws a GeneralError on error.
#
# @param self Reference to the current task object.
# @param path Target directory.
# @param rights Permissions to be granted.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def resetacl(self, path, rights, absolute_stop_time=None, timeout=None):
result = chirp_wrap_resetacl(self.hostport, path, rights, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('resetacl', result, [path, subject, rights])
return result
##
# Returns a list with the names of the files in the path.
# Throws an IOError on error (no such directory).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def ls(self, path, absolute_stop_time=None, timeout=None):
dr = chirp_reli_opendir(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
files = []
if dir is None:
raise IOError(path)
while True:
d = chirp_reli_readdir(dr)
if d is None: break
files.append(Stat(d.name, d.info))
return files
##
# Returns a Chirp.Stat object with information on path.
# Throws an IOError on error (e.g., no such path or insufficient permissions).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def stat(self, path, absolute_stop_time=None, timeout=None):
info = chirp_wrap_stat(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if info is None:
raise IOError(path)
return Stat(path, info)
##
# Changes permissions on path.
# Throws a GeneralFailure on error (e.g., no such path or insufficient permissions).
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param mode Desired permissions (e.g., 0755)
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def chmod(self, path, mode, absolute_stop_time=None, timeout=None):
result = chirp_reli_chmod(self.hostport, path, mode, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise GeneralFailure('chmod', result)
return result
##
# Copies local file/directory source to the chirp server as file/directory destination.
# If destination is not given, source name is used.
# Raises Chirp.TransferFailure on error.
#
# @param self Reference to the current task object.
# @param source A local file or directory.
# @param destination File or directory name to use in the server (defaults to source).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def put(self, source, destination=None, absolute_stop_time=None, timeout=None):
if destination is None:
destination = source
result = chirp_recursive_put(self.hostport,
source, destination,
self.__stoptime(absolute_stop_time, timeout))
if(result > -1):
return result
raise TransferFailure('put', result, source, destination)
##
# Copies server file/directory source to the local file/directory destination.
# If destination is not given, source name is used.
# Raises Chirp.TransferFailure on error.
#
# @param self Reference to the current task object.
# @param source A server file or directory.
# @param destination File or directory name to be used locally (defaults to source).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def get(self, source, destination=None, absolute_stop_time=None, timeout=None):
if destination is None:
destination = source
result = chirp_recursive_get(self.hostport,
source, destination,
self.__stoptime(absolute_stop_time, timeout))
if(result > -1):
return result
raise TransferFailure('get', result, source, destination)
##
# Removes the given file or directory from the server.
# Raises OSError on error.
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def rm(self, path, absolute_stop_time=None, timeout=None):
status = chirp_reli_rmall(self.hostport, path, self.__stoptime(absolute_stop_time, timeout))
if status < 0:
raise OSError
##
# Recursively create the directories in path.
# Raises OSError on error.
#
# @param self Reference to the current task object.
# @param path Target file/directory.
# @param mode Unix permissions for the created directory.
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def mkdir(self, path, mode=493, absolute_stop_time=None, timeout=None):
result = chirp_reli_mkdir_recursive(self.hostport, path, mode, self.__stoptime(absolute_stop_time, timeout))
if result < 0:
raise OSError
return result
##
# Computes the checksum of path.
# Raises IOError on error.
#
# @param self Reference to the current task object.
# @param path Target file.
# @param algorithm One of 'md5' or 'sha1' (default).
# @param absolute_stop_time If given, maximum number of seconds since
# epoch to wait for a server response.
# (Overrides any timeout.)
# @param timeout If given, maximum number of seconds to
# wait for a server response.
def hash(self, path, algorithm='sha1', absolute_stop_time=None, timeout=None):
hash_hex = chirp_wrap_hash(self.hostport, path, algorithm, self.__stoptime(absolute_stop_time, timeout))
if hash_hex is None:
raise IOError
return hash_hex
##
# Creates a chirp job. See http://ccl.cse.nd.edu/software/manuals/chirp.html for details.
#
# @param job_description A dictionary with a job chirp description.
#
# @code
# job_description = {
# 'executable': "/bin/tar",
# 'arguments': [ 'tar', '-cf', 'archive.tar', 'a', 'b' ],
# 'files': { 'task_path': 'a',
# 'serv_path': '/users/magrat/a.txt'
# 'type': 'INPUT' },
# { 'task_path': 'b',
# 'serv_path': '/users/magrat/b.txt'
# 'type': 'INPUT' },
# { 'task_path': 'archive.tar',
# 'serv_path': '/users/magrat/archive.tar'
# 'type': 'OUTPUT' }
# }
# job_id = client.job_create(job_description);
# @endcode
def job_create(self, job_description):
job_json = json.dumps(job_description)
job_id = chirp_wrap_job_create(self.hostport, job_json, self.__stoptime())
if job_id < 0:
raise ChirpJobError('create', job_id, job_json)
return job_id;
##
# Kills the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be killed.
#
def job_kill(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_kill(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('kill', result, ids_str)
return result;
##
# Commits (starts running) the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be committed.
#
def job_commit(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_commit(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('commit', result, ids_str)
return result;
##
# Reaps the jobs identified with the different job ids.
#
# @param job_ids Job ids of the chirp jobs to be reaped.
#
def job_reap(self, *job_ids):
ids_str = json.dumps(job_ids)
result = chirp_wrap_job_reap(self.hostport, ids_str, self.__stoptime())
if result < 0:
raise ChirpJobError('reap', result, ids_str)
return result;
##
# Obtains the current status for each job id. The value returned is a
# list which contains a dictionary reference per job id.
#
# @param job_ids Job ids of the chirp jobs to be reaped.
#
def job_status(self, *job_ids):
ids_str = json.dumps(job_ids)
status = chirp_wrap_job_status(self.hostport, ids_str, self.__stoptime())
if status is None:
raise ChirpJobError('status', None, ids_str)
return json.loads(status);
##
# Waits waiting_time seconds for the job_id to terminate. Return value is
# the same as job_status. If the call timesout, an empty string is
# returned. If job_id is missing, C<<job_wait>> waits for any of the user's job.
#
# @param waiting_time maximum number of seconds to wait for a job to finish.
# @param job_id id of the job to wait.
def job_wait(self, waiting_time, job_id = 0):
status = chirp_wrap_job_wait(self.hostport, job_id, waiting_time, self.__stoptime())
if status is None:
raise ChirpJobError('status', None, job_id)
return json.loads(status);
##
# Python Stat object
#
# This class is used to record stat information for files/directories of a chirp server.
class Stat(object):
def __init__(self, path, cstat):
self._path = path
self._info = cstat
##
# Target path.
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.path
# @endcode
@property
def path(self):
return self._path
##
# ID of device containing file.
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.device
# @endcode
@property
def device(self):
return self._info.cst_dev
##
# inode number
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.inode
# @endcode
@property
def inode(self):
return self._info.cst_ino
##
# file mode permissions
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.mode
# @endcode
@property
def mode(self):
return self._info.cst_mode
##
# number of hard links
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.nlink
# @endcode
@property
def nlink(self):
return self._info.cst_nlink
##
# user ID of owner
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.uid
# @endcode
@property
def uid(self):
return self._info.cst_uid
##
# group ID of owner
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.gid
# @endcode
@property
def gid(self):
return self._info.cst_gid
##
# device ID if special file
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.rdev
# @endcode
@property
def rdev(self):
return self._info.cst_rdev
##
# total size, in bytes
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.size
# @endcode
@property
def size(self):
return self._info.cst_size
##
# block size for file system I/O
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.block_size
# @endcode
@property
def block_size(self):
return self._info.cst_blksize
##
# number of 512B blocks allocated
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.blocks
# @endcode
@property
def blocks(self):
return self._info.cst_blocks
##
# number of seconds since epoch since last access
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.atime
# @endcode
@property
def atime(self):
return self._info.cst_atime
##
# number of seconds since epoch since last modification
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.mtime
# @endcode
@property
def mtime(self):
return self._info.cst_mtime
##
# number of seconds since epoch since last status change
#
# @a Note: This is defined using property decorator. So it must be called without parentheses
# (). For example:
# @code
# >>> print s.ctime
# @endcode
@property
def ctime(self):
return self._info.cst_ctime
def __repr__(self):
return "%s uid:%d gid:%d size:%d" % (self.path, self.uid, self.gid, self.size)
class AuthenticationFailure(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GeneralFailure(Exception):
def __init__(self, action, status, value):
self.action = action
self.status = status
self.value = value
def __str__(self):
return "%s(%s) %s" % (self.action, self.status, self.value)
class TransferFailure(Exception):
def __init__(self, action, status, source, dest):
self.action = action
self.status = status
self.source = source
self.dest = dest
def __str__(self):
return "Error with %s(%s) %s %s" % (self.action, self.status, self.source, self.dest)
class ChirpJobError(Exception):
def __init__(self, action, status, value):
self.action = action
self.status = status
self.value = value
def __str__(self):
return "%s(%s) %s" % (self.action, self.status, self.value)
# @endcode
| gpl-2.0 |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-1024-transformer/dataset_preproc/data_generators/dna_encoder.py | 7 | 4020 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for DNA data.
* DNAEncoder: ACTG strings to ints and back
* DelimitedDNAEncoder: for delimited subsequences
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from six.moves import range # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import text_encoder
class DNAEncoder(text_encoder.TextEncoder):
"""ACTG strings to ints and back. Optionally chunks bases into single ids.
To use a different character set, subclass and set BASES to the char set. UNK
and PAD must not appear in the char set, but can also be reset.
Uses 'N' as an unknown base.
"""
BASES = list("ACTG")
UNK = "N"
PAD = "0"
def __init__(self,
chunk_size=1,
num_reserved_ids=text_encoder.NUM_RESERVED_TOKENS):
super(DNAEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
# Build a vocabulary of chunks of size chunk_size
self._chunk_size = chunk_size
tokens = self._tokens()
tokens.sort()
ids = range(self._num_reserved_ids, len(tokens) + self._num_reserved_ids)
self._ids_to_tokens = dict(zip(ids, tokens))
self._tokens_to_ids = dict(zip(tokens, ids))
def _tokens(self):
chunks = []
for size in range(1, self._chunk_size + 1):
c = itertools.product(self.BASES + [self.UNK], repeat=size)
num_pad = self._chunk_size - size
padding = (self.PAD,) * num_pad
c = [el + padding for el in c]
chunks.extend(c)
return chunks
@property
def vocab_size(self):
return len(self._ids_to_tokens) + self._num_reserved_ids
def encode(self, s):
bases = list(s)
extra = len(bases) % self._chunk_size
if extra > 0:
pad = [self.PAD] * (self._chunk_size - extra)
bases.extend(pad)
assert (len(bases) % self._chunk_size) == 0
num_chunks = len(bases) // self._chunk_size
ids = []
for chunk_idx in range(num_chunks):
start_idx = chunk_idx * self._chunk_size
end_idx = start_idx + self._chunk_size
chunk = tuple(bases[start_idx:end_idx])
if chunk not in self._tokens_to_ids:
raise ValueError("Unrecognized token %s" % chunk)
ids.append(self._tokens_to_ids[chunk])
return ids
def decode(self, ids, strip_extraneous=False):
bases = []
for idx in ids:
if idx >= self._num_reserved_ids:
chunk = self._ids_to_tokens[idx]
if self.PAD in chunk:
chunk = chunk[:chunk.index(self.PAD)]
else:
if strip_extraneous:
continue
chunk = [text_encoder.RESERVED_TOKENS[idx]]
bases.extend(chunk)
return "".join(bases)
class DelimitedDNAEncoder(DNAEncoder):
"""DNAEncoder for delimiter separated subsequences.
Uses ',' as default delimiter.
"""
def __init__(self, delimiter=",", **kwargs):
self._delimiter = delimiter
self._delimiter_key = tuple(self._delimiter)
super(DelimitedDNAEncoder, self).__init__(**kwargs)
@property
def delimiter(self):
return self._delimiter
def _tokens(self):
return super(DelimitedDNAEncoder, self)._tokens() + [self._delimiter_key]
def encode(self, s):
delimited_string = s
ids = []
for part in delimited_string.split(self.delimiter):
ids.extend(super(DelimitedDNAEncoder, self).encode(part))
ids.append(self._tokens_to_ids[self._delimiter_key])
return ids[:-1]
| apache-2.0 |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-32-transformer/dataset_preproc/data_generators/translate_test.py | 7 | 2761 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Translate generators test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tarfile
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators import translate
import tensorflow as tf
class TranslateTest(tf.test.TestCase):
DATASETS = [
["data1.tgz", ("train1.en", "train1.de")],
["data2.tgz", ("train2.en", "train2.de")],
["data3.tgz", ("train3.en", "train3.de")],
]
@classmethod
def setUpClass(cls):
tmp_dir = tf.test.get_temp_dir()
compressed_dir = os.path.join(tmp_dir, "compressed")
shutil.rmtree(tmp_dir)
tf.gfile.MakeDirs(compressed_dir)
en_data = [str(i) for i in range(10, 40)]
de_data = [str(i) for i in range(100, 130)]
data = list(zip(en_data, de_data))
for i, dataset in enumerate(cls.DATASETS):
tar_file = dataset[0]
en_file, de_file = [
os.path.join(compressed_dir, name) for name in dataset[1]
]
with tf.gfile.Open(en_file, "w") as en_f:
with tf.gfile.Open(de_file, "w") as de_f:
start = i * 10
end = start + 10
for en_line, de_line in data[start:end]:
en_f.write(en_line)
en_f.write("\n")
de_f.write(de_line)
de_f.write("\n")
with tarfile.open(os.path.join(tmp_dir, tar_file), "w:gz") as tar_f:
tar_f.add(en_file, os.path.basename(en_file))
tar_f.add(de_file, os.path.basename(de_file))
cls.tmp_dir = tmp_dir
cls.data = data
def testCompileData(self):
filename = "out"
filepath = os.path.join(self.tmp_dir, filename)
translate.compile_data(self.tmp_dir, self.DATASETS, filename)
count = 0
for i, example in enumerate(
text_problems.text2text_txt_iterator(filepath + ".lang1",
filepath + ".lang2")):
expected = self.data[i]
self.assertEqual(list(expected), [example["inputs"], example["targets"]])
count += 1
self.assertEqual(count, len(self.data))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
ahamilton55/ansible | lib/ansible/modules/network/fortios/fortios_ipv4_policy.py | 28 | 11209 | #!/usr/bin/python
#
# Ansible module to manage IPv4 policy objects in fortigate devices
# (c) 2017, Benjamin Jolivot <bjolivot@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: fortios_ipv4_policy
version_added: "2.3"
author: "Benjamin Jolivot (@bjolivot)"
short_description: Manage IPv4 policy objects on Fortinet FortiOS firewall devices
description:
- This module provides management of firewall IPv4 policies on FortiOS devices.
extends_documentation_fragment: fortios
options:
id:
description:
- "Policy ID.
Warning: policy ID number is different than Policy sequence number.
The policy ID is the number assigned at policy creation.
The sequence number represents the order in which the Fortigate will evaluate the rule for policy enforcement,
and also the order in which rules are listed in the GUI and CLI.
These two numbers do not necessarily correlate: this module is based off policy ID.
TIP: policy ID can be viewed in the GUI by adding 'ID' to the display columns"
required: true
state:
description:
- Specifies if policy I(id) need to be added or deleted.
choices: ['present', 'absent']
default: present
src_intf:
description:
- Specifies source interface name.
default: any
dst_intf:
description:
- Specifies destination interface name.
default: any
src_addr:
description:
- Specifies source address (or group) object name(s). Required when I(state=present).
src_addr_negate:
description:
- Negate source address param.
default: false
choices: ["true", "false"]
dst_addr:
description:
- Specifies destination address (or group) object name(s). Required when I(state=present).
dst_addr_negate:
description:
- Negate destination address param.
default: false
choices: ["true", "false"]
policy_action:
description:
- Specifies accept or deny action policy. Required when I(state=present).
choices: ['accept', 'deny']
aliases: ['action']
service:
description:
- "Specifies policy service(s), could be a list (ex: ['MAIL','DNS']). Required when I(state=present)."
aliases:
- services
service_negate:
description:
- Negate policy service(s) defined in service value.
default: false
choices: ["true", "false"]
schedule:
description:
- defines policy schedule.
default: 'always'
nat:
description:
- Enable or disable Nat.
default: false
choices: ["true", "false"]
fixedport:
description:
- Use fixed port for nat.
default: false
choices: ["true", "false"]
poolname:
description:
- Specifies NAT pool name.
av_profile:
description:
- Specifies Antivirus profile name.
webfilter_profile:
description:
- Specifies Webfilter profile name.
ips_sensor:
description:
- Specifies IPS Sensor profile name.
application_list:
description:
- Specifies Application Control name.
logtraffic:
version_added: "2.4"
description:
- Logs sessions that matched policy.
default: utm
choices: ['disable', 'utm', 'all']
logtraffic_start:
version_added: "2.4"
description:
- Logs begining of session as well.
default: false
choices: ["true", "false"]
comment:
description:
- free text to describe policy.
notes:
- This module requires pyFG library.
"""
EXAMPLES = """
- name: Allow external DNS call
fortios_ipv4_policy:
host: 192.168.0.254
username: admin
password: password
id: 42
src_addr: internal_network
dst_addr: all
service: dns
nat: True
state: present
policy_action: accept
logtraffic: disable
- name: Public Web
fortios_ipv4_policy:
host: 192.168.0.254
username: admin
password: password
id: 42
src_addr: all
dst_addr: webservers
services:
- http
- https
state: present
policy_action: accept
"""
RETURN = """
firewall_address_config:
description: full firewall adresses config string
returned: always
type: string
change_string:
description: The commands executed by the module
returned: only if config changed
type: string
msg_error_list:
description: "List of errors returned by CLI (use -vvv for better readability)."
returned: only when error
type: string
"""
from ansible.module_utils.fortios import fortios_argument_spec, fortios_required_if
from ansible.module_utils.fortios import backup, AnsibleFortios
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def main():
argument_spec = dict(
comment = dict(type='str'),
id = dict(type='int', required=True),
src_intf = dict(default='any'),
dst_intf = dict(default='any'),
state = dict(choices=['present', 'absent'], default='present'),
src_addr = dict(type='list'),
dst_addr = dict(type='list'),
src_addr_negate = dict(type='bool', default=False),
dst_addr_negate = dict(type='bool', default=False),
policy_action = dict(choices=['accept','deny'], aliases=['action']),
service = dict(aliases=['services'], type='list'),
service_negate = dict(type='bool', default=False),
schedule = dict(type='str', default='always'),
nat = dict(type='bool', default=False),
fixedport = dict(type='bool', default=False),
poolname = dict(type='str'),
av_profile = dict(type='str'),
webfilter_profile = dict(type='str'),
ips_sensor = dict(type='str'),
application_list = dict(type='str'),
logtraffic = dict(choices=['disable','all','utm'], default='utm'),
logtraffic_start = dict(type='bool', default=False),
)
#merge global required_if & argument_spec from module_utils/fortios.py
argument_spec.update(fortios_argument_spec)
ipv4_policy_required_if = [
['state', 'present', ['src_addr', 'dst_addr', 'policy_action', 'service']],
]
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=fortios_required_if + ipv4_policy_required_if ,
)
#init forti object
fortigate = AnsibleFortios(module)
#Security policies root path
config_path = 'firewall policy'
#test params
#NAT related
if not module.params['nat']:
if module.params['poolname']:
module.fail_json(msg='Poolname param requires NAT to be true.')
if module.params['fixedport']:
module.fail_json(msg='Fixedport param requires NAT to be true.')
#log options
if module.params['logtraffic_start']:
if not module.params['logtraffic'] == 'all':
module.fail_json(msg='Logtraffic_start param requires logtraffic to be set to "all".')
#id must be str(int) for pyFG to work
policy_id = str(module.params['id'])
#load config
fortigate.load_config(config_path)
#Absent State
if module.params['state'] == 'absent':
fortigate.candidate_config[config_path].del_block(policy_id)
#Present state
elif module.params['state'] == 'present':
new_policy = fortigate.get_empty_configuration_block(policy_id, 'edit')
#src / dest / service / interfaces
new_policy.set_param('srcintf', '"%s"' % (module.params['src_intf']))
new_policy.set_param('dstintf', '"%s"' % (module.params['dst_intf']))
new_policy.set_param('srcaddr', " ".join('"' + item + '"' for item in module.params['src_addr']))
new_policy.set_param('dstaddr', " ".join('"' + item + '"' for item in module.params['dst_addr']))
new_policy.set_param('service', " ".join('"' + item + '"' for item in module.params['service']))
# negate src / dest / service
if module.params['src_addr_negate']:
new_policy.set_param('srcaddr-negate', 'enable')
if module.params['dst_addr_negate']:
new_policy.set_param('dstaddr-negate', 'enable')
if module.params['service_negate']:
new_policy.set_param('service-negate', 'enable')
# action
new_policy.set_param('action', '%s' % (module.params['policy_action']))
#logging
new_policy.set_param('logtraffic', '%s' % (module.params['logtraffic']))
if module.params['logtraffic'] == 'all':
if module.params['logtraffic_start']:
new_policy.set_param('logtraffic-start', 'enable')
else:
new_policy.set_param('logtraffic-start', 'disable')
# Schedule
new_policy.set_param('schedule', '%s' % (module.params['schedule']))
#NAT
if module.params['nat']:
new_policy.set_param('nat', 'enable')
if module.params['fixedport']:
new_policy.set_param('fixedport', 'enable')
if module.params['poolname'] is not None:
new_policy.set_param('ippool', 'enable')
new_policy.set_param('poolname', '"%s"' % (module.params['poolname']))
#security profiles:
if module.params['av_profile'] is not None:
new_policy.set_param('av-profile', '"%s"' % (module.params['av_profile']))
if module.params['webfilter_profile'] is not None:
new_policy.set_param('webfilter-profile', '"%s"' % (module.params['webfilter_profile']))
if module.params['ips_sensor'] is not None:
new_policy.set_param('ips-sensor', '"%s"' % (module.params['ips_sensor']))
if module.params['application_list'] is not None:
new_policy.set_param('application-list', '"%s"' % (module.params['application_list']))
# comment
if module.params['comment'] is not None:
new_policy.set_param('comment', '"%s"' % (module.params['comment']))
#add the new policy to the device
fortigate.add_block(policy_id, new_policy)
#Apply changes
fortigate.apply_changes()
if __name__ == '__main__':
main()
| gpl-3.0 |
sv-dev1/odoo | addons/project_issue_sheet/__init__.py | 442 | 1105 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_issue_sheet
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
matbu/ansible-modules-extras | packaging/os/pkgin.py | 11 | 11808 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
#
# Written by Shaun Zinck
# Based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: pkgin
short_description: Package manager for SmartOS, NetBSD, et al.
description:
- "The standard package manager for SmartOS, but also usable on NetBSD
or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
version_added: "1.0"
author:
- "Larry Gilbert (L2G)"
- "Shaun Zinck (@szinck)"
- "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- "Known bug with pkgin < 0.8.0: if a package is removed and another
package depends on it, the other package will be silently removed as
well. New to Ansible 1.9: check-mode support."
options:
name:
description:
- Name of package to install/remove;
- multiple names may be given, separated by commas
required: false
default: null
state:
description:
- Intended state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
update_cache:
description:
- Update repository database. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
upgrade:
description:
- Upgrade main packages to their newer versions
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
full_upgrade:
description:
- Upgrade all packages to their newer versions
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
clean:
description:
- Clean packages cache
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
force:
description:
- Force package reinstall
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.1"
'''
EXAMPLES = '''
# install package foo
- pkgin:
name: foo
state: present
# Update database and install "foo" package
- pkgin:
name: foo
update_cache: yes
# remove package foo
- pkgin:
name: foo
state: absent
# remove packages foo and bar
- pkgin:
name: foo,bar
state: absent
# Update repositories as a separate step
- pkgin:
update_cache: yes
# Upgrade main packages (equivalent to C(pkgin upgrade))
- pkgin:
upgrade: yes
# Upgrade all packages (equivalent to C(pkgin full-upgrade))
- pkgin:
full_upgrade: yes
# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade))
- pkgin:
full_upgrade: yes
force: yes
# clean packages cache (equivalent to C(pkgin clean))
- pkgin:
clean: yes
'''
import re
def query_package(module, name):
"""Search for the package by name.
Possible return values:
* "present" - installed, no upgrade needed
* "outdated" - installed, but can be upgraded
* False - not installed or not found
"""
# test whether '-p' (parsable) flag is supported.
rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
if rc == 0:
pflag = '-p'
splitchar = ';'
else:
pflag = ''
splitchar = ' '
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
# rc will not be 0 unless the search was a success
if rc == 0:
# Search results may contain more than one line (e.g., 'emacs'), so iterate
# through each line to see if we have a match.
packages = out.split('\n')
for package in packages:
# Break up line at spaces. The first part will be the package with its
# version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
# of the package:
# '' - not installed
# '<' - installed but out of date
# '=' - installed and up to date
# '>' - installed but newer than the repository version
pkgname_with_version, raw_state = package.split(splitchar)[0:2]
# Search for package, stripping version
# (results in sth like 'gcc47-libs' or 'emacs24-nox11')
pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
# Do not proceed unless we have a match
if not pkg_search_obj:
continue
# Grab matched string
pkgname_without_version = pkg_search_obj.group(1)
if name != pkgname_without_version:
continue
# The package was found; now return its state
if raw_state == '<':
return 'outdated'
elif raw_state == '=' or raw_state == '>':
return 'present'
else:
return False
# no fall-through
# No packages were matched, so return False
return False
def format_action_message(module, action, count):
vars = { "actioned": action,
"count": count }
if module.check_mode:
message = "would have %(actioned)s %(count)d package" % vars
else:
message = "%(actioned)s %(count)d package" % vars
if count == 1:
return message
else:
return message + "s"
def format_pkgin_command(module, command, package=None):
# Not all commands take a package argument, so cover this up by passing
# an empty string. Some commands (e.g. 'update') will ignore extra
# arguments, however this behaviour cannot be relied on for others.
if package is None:
package = ""
if module.params["force"]:
force = "-F"
else:
force = ""
vars = { "pkgin": PKGIN_PATH,
"command": command,
"package": package,
"force": force}
if module.check_mode:
return "%(pkgin)s -n %(command)s %(package)s" % vars
else:
return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
def remove_packages(module, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "remove", package))
if not module.check_mode and query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, packages):
install_c = 0
for package in packages:
if query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "install", package))
if not module.check_mode and not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_package_db(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "update"))
if rc == 0:
if re.search('database for.*is up-to-date\n$', out):
return False, "datebase is up-to-date"
else:
return True, "updated repository database"
else:
module.fail_json(msg="could not update package db")
def do_upgrade_packages(module, full=False):
if full:
cmd = "full-upgrade"
else:
cmd = "upgrade"
rc, out, err = module.run_command(
format_pkgin_command(module, cmd))
if rc == 0:
if re.search('^nothing to do.\n$', out):
module.exit_json(changed=False, msg="nothing left to upgrade")
else:
module.fail_json(msg="could not %s packages" % cmd)
def upgrade_packages(module):
do_upgrade_packages(module)
def full_upgrade_packages(module):
do_upgrade_packages(module, True)
def clean_cache(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "clean"))
if rc == 0:
# There's no indication if 'clean' actually removed anything,
# so assume it did.
module.exit_json(changed=True, msg="cleaned caches")
else:
module.fail_json(msg="could not clean package cache")
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"]),
name = dict(aliases=["pkg"], type='list'),
update_cache = dict(default='no', type='bool'),
upgrade = dict(default='no', type='bool'),
full_upgrade = dict(default='no', type='bool'),
clean = dict(default='no', type='bool'),
force = dict(default='no', type='bool')),
required_one_of = [['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
supports_check_mode = True)
global PKGIN_PATH
PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p["update_cache"]:
c, msg = update_package_db(module)
if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
module.exit_json(changed=c, msg=msg)
if p["upgrade"]:
upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded packages')
if p["full_upgrade"]:
full_upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded all packages')
if p["clean"]:
clean_cache(module)
if not p['name']:
module.exit_json(changed=True, msg='cleaned caches')
pkgs = p["name"]
if p["state"] == "present":
install_packages(module, pkgs)
elif p["state"] == "absent":
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
havard024/prego | venv/lib/python2.7/site-packages/unidecode/x09f.py | 252 | 4509 | data = (
'Cu ', # 0x00
'Qu ', # 0x01
'Chao ', # 0x02
'Wa ', # 0x03
'Zhu ', # 0x04
'Zhi ', # 0x05
'Mang ', # 0x06
'Ao ', # 0x07
'Bie ', # 0x08
'Tuo ', # 0x09
'Bi ', # 0x0a
'Yuan ', # 0x0b
'Chao ', # 0x0c
'Tuo ', # 0x0d
'Ding ', # 0x0e
'Mi ', # 0x0f
'Nai ', # 0x10
'Ding ', # 0x11
'Zi ', # 0x12
'Gu ', # 0x13
'Gu ', # 0x14
'Dong ', # 0x15
'Fen ', # 0x16
'Tao ', # 0x17
'Yuan ', # 0x18
'Pi ', # 0x19
'Chang ', # 0x1a
'Gao ', # 0x1b
'Qi ', # 0x1c
'Yuan ', # 0x1d
'Tang ', # 0x1e
'Teng ', # 0x1f
'Shu ', # 0x20
'Shu ', # 0x21
'Fen ', # 0x22
'Fei ', # 0x23
'Wen ', # 0x24
'Ba ', # 0x25
'Diao ', # 0x26
'Tuo ', # 0x27
'Tong ', # 0x28
'Qu ', # 0x29
'Sheng ', # 0x2a
'Shi ', # 0x2b
'You ', # 0x2c
'Shi ', # 0x2d
'Ting ', # 0x2e
'Wu ', # 0x2f
'Nian ', # 0x30
'Jing ', # 0x31
'Hun ', # 0x32
'Ju ', # 0x33
'Yan ', # 0x34
'Tu ', # 0x35
'Ti ', # 0x36
'Xi ', # 0x37
'Xian ', # 0x38
'Yan ', # 0x39
'Lei ', # 0x3a
'Bi ', # 0x3b
'Yao ', # 0x3c
'Qiu ', # 0x3d
'Han ', # 0x3e
'Wu ', # 0x3f
'Wu ', # 0x40
'Hou ', # 0x41
'Xi ', # 0x42
'Ge ', # 0x43
'Zha ', # 0x44
'Xiu ', # 0x45
'Weng ', # 0x46
'Zha ', # 0x47
'Nong ', # 0x48
'Nang ', # 0x49
'Qi ', # 0x4a
'Zhai ', # 0x4b
'Ji ', # 0x4c
'Zi ', # 0x4d
'Ji ', # 0x4e
'Ji ', # 0x4f
'Qi ', # 0x50
'Ji ', # 0x51
'Chi ', # 0x52
'Chen ', # 0x53
'Chen ', # 0x54
'He ', # 0x55
'Ya ', # 0x56
'Ken ', # 0x57
'Xie ', # 0x58
'Pao ', # 0x59
'Cuo ', # 0x5a
'Shi ', # 0x5b
'Zi ', # 0x5c
'Chi ', # 0x5d
'Nian ', # 0x5e
'Ju ', # 0x5f
'Tiao ', # 0x60
'Ling ', # 0x61
'Ling ', # 0x62
'Chu ', # 0x63
'Quan ', # 0x64
'Xie ', # 0x65
'Ken ', # 0x66
'Nie ', # 0x67
'Jiu ', # 0x68
'Yao ', # 0x69
'Chuo ', # 0x6a
'Kun ', # 0x6b
'Yu ', # 0x6c
'Chu ', # 0x6d
'Yi ', # 0x6e
'Ni ', # 0x6f
'Cuo ', # 0x70
'Zou ', # 0x71
'Qu ', # 0x72
'Nen ', # 0x73
'Xian ', # 0x74
'Ou ', # 0x75
'E ', # 0x76
'Wo ', # 0x77
'Yi ', # 0x78
'Chuo ', # 0x79
'Zou ', # 0x7a
'Dian ', # 0x7b
'Chu ', # 0x7c
'Jin ', # 0x7d
'Ya ', # 0x7e
'Chi ', # 0x7f
'Chen ', # 0x80
'He ', # 0x81
'Ken ', # 0x82
'Ju ', # 0x83
'Ling ', # 0x84
'Pao ', # 0x85
'Tiao ', # 0x86
'Zi ', # 0x87
'Ken ', # 0x88
'Yu ', # 0x89
'Chuo ', # 0x8a
'Qu ', # 0x8b
'Wo ', # 0x8c
'Long ', # 0x8d
'Pang ', # 0x8e
'Gong ', # 0x8f
'Pang ', # 0x90
'Yan ', # 0x91
'Long ', # 0x92
'Long ', # 0x93
'Gong ', # 0x94
'Kan ', # 0x95
'Ta ', # 0x96
'Ling ', # 0x97
'Ta ', # 0x98
'Long ', # 0x99
'Gong ', # 0x9a
'Kan ', # 0x9b
'Gui ', # 0x9c
'Qiu ', # 0x9d
'Bie ', # 0x9e
'Gui ', # 0x9f
'Yue ', # 0xa0
'Chui ', # 0xa1
'He ', # 0xa2
'Jue ', # 0xa3
'Xie ', # 0xa4
'Yu ', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| mit |
broadinstitute/PyGithub | github/CommitStats.py | 74 | 3093 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class CommitStats(github.GithubObject.NonCompletableGithubObject):
"""
This class represents CommitStatss as returned for example by http://developer.github.com/v3/todo
"""
@property
def additions(self):
"""
:type: integer
"""
return self._additions.value
@property
def deletions(self):
"""
:type: integer
"""
return self._deletions.value
@property
def total(self):
"""
:type: integer
"""
return self._total.value
def _initAttributes(self):
self._additions = github.GithubObject.NotSet
self._deletions = github.GithubObject.NotSet
self._total = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "additions" in attributes: # pragma no branch
self._additions = self._makeIntAttribute(attributes["additions"])
if "deletions" in attributes: # pragma no branch
self._deletions = self._makeIntAttribute(attributes["deletions"])
if "total" in attributes: # pragma no branch
self._total = self._makeIntAttribute(attributes["total"])
| gpl-3.0 |
sharma1nitish/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/png_unittest.py | 124 | 5663 | # Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for png.py."""
import unittest2 as unittest
from png import PNGChecker
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost_mock import MockSystemHost
class MockSCMDetector(object):
def __init__(self, scm, prop=None):
self._scm = scm
self._prop = prop
def display_name(self):
return self._scm
def propget(self, pname, path):
return self._prop
class PNGCheckerTest(unittest.TestCase):
"""Tests PNGChecker class."""
def test_init(self):
"""Test __init__() method."""
def mock_handle_style_error(self):
pass
checker = PNGChecker("test/config", mock_handle_style_error, MockSCMDetector('git'), MockSystemHost())
self.assertEqual(checker._file_path, "test/config")
self.assertEqual(checker._handle_style_error, mock_handle_style_error)
def test_check(self):
errors = []
def mock_handle_style_error(line_number, category, confidence, message):
error = (line_number, category, confidence, message)
errors.append(error)
file_path = ''
fs = MockFileSystem()
scm = MockSCMDetector('svn')
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0],
(0, 'image/png', 5, 'Set the svn:mime-type property (svn propset svn:mime-type image/png ).'))
files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n#enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes\nenable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': 'enable-auto-props = no'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
file_path = "foo.png"
fs.write_binary_file(file_path, "Dummy binary data")
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
file_path = "foo-expected.png"
fs.write_binary_file(file_path, "Dummy binary data")
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))
| bsd-3-clause |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py | 104 | 6037 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LazyAdamOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import lazy_adam_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = lazy_adam_optimizer.LazyAdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
repeated_update = repeated_update_opt.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
aggregated_update = aggregated_update_opt.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
if __name__ == "__main__":
test.main()
| mit |
sajeeshcs/nested_projects_keystone | keystone/common/validation/validators.py | 24 | 2735 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Internal implementation of request body validating middleware."""
import jsonschema
from keystone import exception
from keystone.i18n import _
class SchemaValidator(object):
"""Resource reference validator class."""
validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema):
# NOTE(lbragstad): If at some point in the future we want to extend
# our validators to include something specific we need to check for,
# we can do it here. Nova's V3 API validators extend the validator to
# include `self._validate_minimum` and `self._validate_maximum`. This
# would be handy if we needed to check for something the jsonschema
# didn't by default. See the Nova V3 validator for details on how this
# is done.
validators = {}
validator_cls = jsonschema.validators.extend(self.validator_org,
validators)
fc = jsonschema.FormatChecker()
self.validator = validator_cls(schema, format_checker=fc)
def validate(self, *args, **kwargs):
try:
self.validator.validate(*args, **kwargs)
except jsonschema.ValidationError as ex:
# NOTE: For whole OpenStack message consistency, this error
# message has been written in a format consistent with WSME.
if len(ex.path) > 0:
# NOTE(lbragstad): Here we could think about using iter_errors
# as a method of providing invalid parameters back to the
# user.
# TODO(lbragstad): If the value of a field is confidential or
# too long, then we should build the masking in here so that
# we don't expose sensitive user information in the event it
# fails validation.
detail = _("Invalid input for field '%(path)s'. The value is "
"'%(value)s'.") % {'path': ex.path.pop(),
'value': ex.instance}
else:
detail = ex.message
raise exception.SchemaValidationError(detail=detail)
| apache-2.0 |
yohanko88/gem5-DC | src/python/m5/SimObject.py | 13 | 57534 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# Copyright (c) 2010-20013 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Nathan Binkert
# Andreas Hansson
import sys
from types import FunctionType, MethodType, ModuleType
import m5
from m5.util import *
# Have to import params up top since Param is referenced on initial
# load (when SimObject class references Param to create a class
# variable, the 'name' param)...
from m5.params import *
# There are a few things we need that aren't in params.__all__ since
# normal users don't need them
from m5.params import ParamDesc, VectorParamDesc, \
isNullPointer, SimObjectVector, Port
from m5.proxy import *
from m5.proxy import isproxy
#####################################################################
#
# M5 Python Configuration Utility
#
# The basic idea is to write simple Python programs that build Python
# objects corresponding to M5 SimObjects for the desired simulation
# configuration. For now, the Python emits a .ini file that can be
# parsed by M5. In the future, some tighter integration between M5
# and the Python interpreter may allow bypassing the .ini file.
#
# Each SimObject class in M5 is represented by a Python class with the
# same name. The Python inheritance tree mirrors the M5 C++ tree
# (e.g., SimpleCPU derives from BaseCPU in both cases, and all
# SimObjects inherit from a single SimObject base class). To specify
# an instance of an M5 SimObject in a configuration, the user simply
# instantiates the corresponding Python object. The parameters for
# that SimObject are given by assigning to attributes of the Python
# object, either using keyword assignment in the constructor or in
# separate assignment statements. For example:
#
# cache = BaseCache(size='64KB')
# cache.hit_latency = 3
# cache.assoc = 8
#
# The magic lies in the mapping of the Python attributes for SimObject
# classes to the actual SimObject parameter specifications. This
# allows parameter validity checking in the Python code. Continuing
# the example above, the statements "cache.blurfl=3" or
# "cache.assoc='hello'" would both result in runtime errors in Python,
# since the BaseCache object has no 'blurfl' parameter and the 'assoc'
# parameter requires an integer, respectively. This magic is done
# primarily by overriding the special __setattr__ method that controls
# assignment to object attributes.
#
# Once a set of Python objects have been instantiated in a hierarchy,
# calling 'instantiate(obj)' (where obj is the root of the hierarchy)
# will generate a .ini file.
#
#####################################################################
# list of all SimObject classes
allClasses = {}
# dict to look up SimObjects based on path
instanceDict = {}
# Did any of the SimObjects lack a header file?
noCxxHeader = False
def public_value(key, value):
return key.startswith('_') or \
isinstance(value, (FunctionType, MethodType, ModuleType,
classmethod, type))
def createCxxConfigDirectoryEntryFile(code, name, simobj, is_header):
entry_class = 'CxxConfigDirectoryEntry_%s' % name
param_class = '%sCxxConfigParams' % name
code('#include "params/%s.hh"' % name)
if not is_header:
for param in simobj._params.values():
if isSimObjectClass(param.ptype):
code('#include "%s"' % param.ptype._value_dict['cxx_header'])
code('#include "params/%s.hh"' % param.ptype.__name__)
else:
param.ptype.cxx_ini_predecls(code)
if is_header:
member_prefix = ''
end_of_decl = ';'
code('#include "sim/cxx_config.hh"')
code()
code('class ${param_class} : public CxxConfigParams,'
' public ${name}Params')
code('{')
code(' private:')
code.indent()
code('class DirectoryEntry : public CxxConfigDirectoryEntry')
code('{')
code(' public:')
code.indent()
code('DirectoryEntry();');
code()
code('CxxConfigParams *makeParamsObject() const')
code('{ return new ${param_class}; }')
code.dedent()
code('};')
code()
code.dedent()
code(' public:')
code.indent()
else:
member_prefix = '%s::' % param_class
end_of_decl = ''
code('#include "%s"' % simobj._value_dict['cxx_header'])
code('#include "base/str.hh"')
code('#include "cxx_config/${name}.hh"')
if simobj._ports.values() != []:
code('#include "mem/mem_object.hh"')
code('#include "mem/port.hh"')
code()
code('${member_prefix}DirectoryEntry::DirectoryEntry()');
code('{')
def cxx_bool(b):
return 'true' if b else 'false'
code.indent()
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
code('parameters["%s"] = new ParamDesc("%s", %s, %s);' %
(param.name, param.name, cxx_bool(is_vector),
cxx_bool(is_simobj)));
for port in simobj._ports.values():
is_vector = isinstance(port, m5.params.VectorPort)
is_master = port.role == 'MASTER'
code('ports["%s"] = new PortDesc("%s", %s, %s);' %
(port.name, port.name, cxx_bool(is_vector),
cxx_bool(is_master)))
code.dedent()
code('}')
code()
code('bool ${member_prefix}setSimObject(const std::string &name,')
code(' SimObject *simObject)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if is_simobj and not is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
code('this->${{param.name}} = '
'dynamic_cast<${{param.ptype.cxx_type}}>(simObject);')
code('if (simObject && !this->${{param.name}})')
code(' ret = false;')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('bool ${member_prefix}setSimObjectVector('
'const std::string &name,')
code(' const std::vector<SimObject *> &simObjects)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if is_simobj and is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
code('this->${{param.name}}.clear();')
code('for (auto i = simObjects.begin(); '
'ret && i != simObjects.end(); i ++)')
code('{')
code.indent()
code('${{param.ptype.cxx_type}} object = '
'dynamic_cast<${{param.ptype.cxx_type}}>(*i);')
code('if (*i && !object)')
code(' ret = false;')
code('else')
code(' this->${{param.name}}.push_back(object);')
code.dedent()
code('}')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('void ${member_prefix}setName(const std::string &name_)'
'${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('this->name = name_;')
code('this->pyobj = NULL;')
code.dedent()
code('}')
if is_header:
code('const std::string &${member_prefix}getName()')
code('{ return this->name; }')
code()
code('bool ${member_prefix}setParam(const std::string &name,')
code(' const std::string &value, const Flags flags)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if not is_simobj and not is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
param.ptype.cxx_ini_parse(code,
'value', 'this->%s' % param.name, 'ret =')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('bool ${member_prefix}setParamVector('
'const std::string &name,')
code(' const std::vector<std::string> &values,')
code(' const Flags flags)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if not is_simobj and is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
code('${{param.name}}.clear();')
code('for (auto i = values.begin(); '
'ret && i != values.end(); i ++)')
code('{')
code.indent()
code('${{param.ptype.cxx_type}} elem;')
param.ptype.cxx_ini_parse(code,
'*i', 'elem', 'ret =')
code('if (ret)')
code(' this->${{param.name}}.push_back(elem);')
code.dedent()
code('}')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('bool ${member_prefix}setPortConnectionCount('
'const std::string &name,')
code(' unsigned int count)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false)')
code(' ;')
for port in simobj._ports.values():
code('else if (name == "${{port.name}}")')
code(' this->port_${{port.name}}_connection_count = count;')
code('else')
code(' ret = false;')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('SimObject *${member_prefix}simObjectCreate()${end_of_decl}')
if not is_header:
code('{')
if hasattr(simobj, 'abstract') and simobj.abstract:
code(' return NULL;')
else:
code(' return this->create();')
code('}')
if is_header:
code()
code('static CxxConfigDirectoryEntry'
' *${member_prefix}makeDirectoryEntry()')
code('{ return new DirectoryEntry; }')
if is_header:
code.dedent()
code('};')
# The metaclass for SimObject. This class controls how new classes
# that derive from SimObject are instantiated, and provides inherited
# class behavior (just like a class controls how instances of that
# class are instantiated, and provides inherited instance behavior).
class MetaSimObject(type):
# Attributes that can be set only at initialization time
init_keywords = { 'abstract' : bool,
'cxx_class' : str,
'cxx_type' : str,
'cxx_header' : str,
'type' : str,
'cxx_bases' : list }
# Attributes that can be set any time
keywords = { 'check' : FunctionType }
# __new__ is called before __init__, and is where the statements
# in the body of the class definition get loaded into the class's
# __dict__. We intercept this to filter out parameter & port assignments
# and only allow "private" attributes to be passed to the base
# __new__ (starting with underscore).
def __new__(mcls, name, bases, dict):
assert name not in allClasses, "SimObject %s already present" % name
# Copy "private" attributes, functions, and classes to the
# official dict. Everything else goes in _init_dict to be
# filtered in __init__.
cls_dict = {}
value_dict = {}
for key,val in dict.items():
if public_value(key, val):
cls_dict[key] = val
else:
# must be a param/port setting
value_dict[key] = val
if 'abstract' not in value_dict:
value_dict['abstract'] = False
if 'cxx_bases' not in value_dict:
value_dict['cxx_bases'] = []
cls_dict['_value_dict'] = value_dict
cls = super(MetaSimObject, mcls).__new__(mcls, name, bases, cls_dict)
if 'type' in value_dict:
allClasses[name] = cls
return cls
# subclass initialization
def __init__(cls, name, bases, dict):
# calls type.__init__()... I think that's a no-op, but leave
# it here just in case it's not.
super(MetaSimObject, cls).__init__(name, bases, dict)
# initialize required attributes
# class-only attributes
cls._params = multidict() # param descriptions
cls._ports = multidict() # port descriptions
# class or instance attributes
cls._values = multidict() # param values
cls._hr_values = multidict() # human readable param values
cls._children = multidict() # SimObject children
cls._port_refs = multidict() # port ref objects
cls._instantiated = False # really instantiated, cloned, or subclassed
# We don't support multiple inheritance of sim objects. If you want
# to, you must fix multidict to deal with it properly. Non sim-objects
# are ok, though
bTotal = 0
for c in bases:
if isinstance(c, MetaSimObject):
bTotal += 1
if bTotal > 1:
raise TypeError, "SimObjects do not support multiple inheritance"
base = bases[0]
# Set up general inheritance via multidicts. A subclass will
# inherit all its settings from the base class. The only time
# the following is not true is when we define the SimObject
# class itself (in which case the multidicts have no parent).
if isinstance(base, MetaSimObject):
cls._base = base
cls._params.parent = base._params
cls._ports.parent = base._ports
cls._values.parent = base._values
cls._hr_values.parent = base._hr_values
cls._children.parent = base._children
cls._port_refs.parent = base._port_refs
# mark base as having been subclassed
base._instantiated = True
else:
cls._base = None
# default keyword values
if 'type' in cls._value_dict:
if 'cxx_class' not in cls._value_dict:
cls._value_dict['cxx_class'] = cls._value_dict['type']
cls._value_dict['cxx_type'] = '%s *' % cls._value_dict['cxx_class']
if 'cxx_header' not in cls._value_dict:
global noCxxHeader
noCxxHeader = True
warn("No header file specified for SimObject: %s", name)
# Export methods are automatically inherited via C++, so we
# don't want the method declarations to get inherited on the
# python side (and thus end up getting repeated in the wrapped
# versions of derived classes). The code below basicallly
# suppresses inheritance by substituting in the base (null)
# versions of these methods unless a different version is
# explicitly supplied.
for method_name in ('export_methods', 'export_method_cxx_predecls',
'export_method_swig_predecls'):
if method_name not in cls.__dict__:
base_method = getattr(MetaSimObject, method_name)
m = MethodType(base_method, cls, MetaSimObject)
setattr(cls, method_name, m)
# Now process the _value_dict items. They could be defining
# new (or overriding existing) parameters or ports, setting
# class keywords (e.g., 'abstract'), or setting parameter
# values or port bindings. The first 3 can only be set when
# the class is defined, so we handle them here. The others
# can be set later too, so just emulate that by calling
# setattr().
for key,val in cls._value_dict.items():
# param descriptions
if isinstance(val, ParamDesc):
cls._new_param(key, val)
# port objects
elif isinstance(val, Port):
cls._new_port(key, val)
# init-time-only keywords
elif cls.init_keywords.has_key(key):
cls._set_keyword(key, val, cls.init_keywords[key])
# default: use normal path (ends up in __setattr__)
else:
setattr(cls, key, val)
def _set_keyword(cls, keyword, val, kwtype):
if not isinstance(val, kwtype):
raise TypeError, 'keyword %s has bad type %s (expecting %s)' % \
(keyword, type(val), kwtype)
if isinstance(val, FunctionType):
val = classmethod(val)
type.__setattr__(cls, keyword, val)
def _new_param(cls, name, pdesc):
# each param desc should be uniquely assigned to one variable
assert(not hasattr(pdesc, 'name'))
pdesc.name = name
cls._params[name] = pdesc
if hasattr(pdesc, 'default'):
cls._set_param(name, pdesc.default, pdesc)
def _set_param(cls, name, value, param):
assert(param.name == name)
try:
hr_value = value
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, cls.__name__, name, value)
e.args = (msg, )
raise
cls._values[name] = value
# if param value is a SimObject, make it a child too, so that
# it gets cloned properly when the class is instantiated
if isSimObjectOrVector(value) and not value.has_parent():
cls._add_cls_child(name, value)
# update human-readable values of the param if it has a literal
# value and is not an object or proxy.
if not (isSimObjectOrVector(value) or\
isinstance(value, m5.proxy.BaseProxy)):
cls._hr_values[name] = hr_value
def _add_cls_child(cls, name, child):
# It's a little funky to have a class as a parent, but these
# objects should never be instantiated (only cloned, which
# clears the parent pointer), and this makes it clear that the
# object is not an orphan and can provide better error
# messages.
child.set_parent(cls, name)
cls._children[name] = child
def _new_port(cls, name, port):
# each port should be uniquely assigned to one variable
assert(not hasattr(port, 'name'))
port.name = name
cls._ports[name] = port
# same as _get_port_ref, effectively, but for classes
def _cls_get_port_ref(cls, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = cls._port_refs.get(attr)
if not ref:
ref = cls._ports[attr].makeRef(cls)
cls._port_refs[attr] = ref
return ref
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(cls, attr, value):
# normal processing for private attributes
if public_value(attr, value):
type.__setattr__(cls, attr, value)
return
if cls.keywords.has_key(attr):
cls._set_keyword(attr, value, cls.keywords[attr])
return
if cls._ports.has_key(attr):
cls._cls_get_port_ref(attr).connect(value)
return
if isSimObjectOrSequence(value) and cls._instantiated:
raise RuntimeError, \
"cannot set SimObject parameter '%s' after\n" \
" class %s has been instantiated or subclassed" \
% (attr, cls.__name__)
# check for param
param = cls._params.get(attr)
if param:
cls._set_param(attr, value, param)
return
if isSimObjectOrSequence(value):
# If RHS is a SimObject, it's an implicit child assignment.
cls._add_cls_child(attr, coerceSimObjectOrVector(value))
return
# no valid assignment... raise exception
raise AttributeError, \
"Class %s has no parameter \'%s\'" % (cls.__name__, attr)
def __getattr__(cls, attr):
if attr == 'cxx_class_path':
return cls.cxx_class.split('::')
if attr == 'cxx_class_name':
return cls.cxx_class_path[-1]
if attr == 'cxx_namespaces':
return cls.cxx_class_path[:-1]
if cls._values.has_key(attr):
return cls._values[attr]
if cls._children.has_key(attr):
return cls._children[attr]
raise AttributeError, \
"object '%s' has no attribute '%s'" % (cls.__name__, attr)
def __str__(cls):
return cls.__name__
# See ParamValue.cxx_predecls for description.
def cxx_predecls(cls, code):
code('#include "params/$cls.hh"')
# See ParamValue.swig_predecls for description.
def swig_predecls(cls, code):
code('%import "python/m5/internal/param_$cls.i"')
# Hook for exporting additional C++ methods to Python via SWIG.
# Default is none, override using @classmethod in class definition.
def export_methods(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be compiled in the _wrap.cc
# file. Typically generates one or more #include statements. If
# any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_cxx_predecls(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be processed by SWIG.
# Typically generates one or more %include or %import statements.
# If any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_swig_predecls(cls, code):
pass
# Generate the declaration for this object for wrapping with SWIG.
# Generates code that goes into a SWIG .i file. Called from
# src/SConscript.
def swig_decl(cls, code):
class_path = cls.cxx_class.split('::')
classname = class_path[-1]
namespaces = class_path[:-1]
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here). Sort the params based on their key
params = map(lambda (k, v): v, sorted(cls._params.local.items()))
ports = cls._ports.local
code('%module(package="m5.internal") param_$cls')
code()
code('%{')
code('#include "sim/sim_object.hh"')
code('#include "params/$cls.hh"')
for param in params:
param.cxx_predecls(code)
code('#include "${{cls.cxx_header}}"')
cls.export_method_cxx_predecls(code)
code('''\
/**
* This is a workaround for bug in swig. Prior to gcc 4.6.1 the STL
* headers like vector, string, etc. used to automatically pull in
* the cstddef header but starting with gcc 4.6.1 they no longer do.
* This leads to swig generated a file that does not compile so we
* explicitly include cstddef. Additionally, including version 2.0.4,
* swig uses ptrdiff_t without the std:: namespace prefix which is
* required with gcc 4.6.1. We explicitly provide access to it.
*/
#include <cstddef>
using std::ptrdiff_t;
''')
code('%}')
code()
for param in params:
param.swig_predecls(code)
cls.export_method_swig_predecls(code)
code()
if cls._base:
code('%import "python/m5/internal/param_${{cls._base}}.i"')
code()
for ns in namespaces:
code('namespace $ns {')
if namespaces:
code('// avoid name conflicts')
sep_string = '_COLONS_'
flat_name = sep_string.join(class_path)
code('%rename($flat_name) $classname;')
code()
code('// stop swig from creating/wrapping default ctor/dtor')
code('%nodefault $classname;')
code('class $classname')
if cls._base:
bases = [ cls._base.cxx_class ] + cls.cxx_bases
else:
bases = cls.cxx_bases
base_first = True
for base in bases:
if base_first:
code(' : public ${{base}}')
base_first = False
else:
code(' , public ${{base}}')
code('{')
code(' public:')
cls.export_methods(code)
code('};')
for ns in reversed(namespaces):
code('} // namespace $ns')
code()
code('%include "params/$cls.hh"')
# Generate the C++ declaration (.hh file) for this SimObject's
# param struct. Called from src/SConscript.
def cxx_param_decl(cls, code):
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here). Sort the params based on their key
params = map(lambda (k, v): v, sorted(cls._params.local.items()))
ports = cls._ports.local
try:
ptypes = [p.ptype for p in params]
except:
print cls, p, p.ptype_str
print params
raise
class_path = cls._value_dict['cxx_class'].split('::')
code('''\
#ifndef __PARAMS__${cls}__
#define __PARAMS__${cls}__
''')
# A forward class declaration is sufficient since we are just
# declaring a pointer.
for ns in class_path[:-1]:
code('namespace $ns {')
code('class $0;', class_path[-1])
for ns in reversed(class_path[:-1]):
code('} // namespace $ns')
code()
# The base SimObject has a couple of params that get
# automatically set from Python without being declared through
# the normal Param mechanism; we slip them in here (needed
# predecls now, actual declarations below)
if cls == SimObject:
code('''
#ifndef PY_VERSION
struct PyObject;
#endif
#include <string>
''')
for param in params:
param.cxx_predecls(code)
for port in ports.itervalues():
port.cxx_predecls(code)
code()
if cls._base:
code('#include "params/${{cls._base.type}}.hh"')
code()
for ptype in ptypes:
if issubclass(ptype, Enum):
code('#include "enums/${{ptype.__name__}}.hh"')
code()
# now generate the actual param struct
code("struct ${cls}Params")
if cls._base:
code(" : public ${{cls._base.type}}Params")
code("{")
if not hasattr(cls, 'abstract') or not cls.abstract:
if 'type' in cls.__dict__:
code(" ${{cls.cxx_type}} create();")
code.indent()
if cls == SimObject:
code('''
SimObjectParams() {}
virtual ~SimObjectParams() {}
std::string name;
PyObject *pyobj;
''')
for param in params:
param.cxx_decl(code)
for port in ports.itervalues():
port.cxx_decl(code)
code.dedent()
code('};')
code()
code('#endif // __PARAMS__${cls}__')
return code
# Generate the C++ declaration/definition files for this SimObject's
# param struct to allow C++ initialisation
def cxx_config_param_file(cls, code, is_header):
createCxxConfigDirectoryEntryFile(code, cls.__name__, cls, is_header)
return code
# This *temporary* definition is required to support calls from the
# SimObject class definition to the MetaSimObject methods (in
# particular _set_param, which gets called for parameters with default
# values defined on the SimObject class itself). It will get
# overridden by the permanent definition (which requires that
# SimObject be defined) lower in this file.
def isSimObjectOrVector(value):
return False
# This class holds information about each simobject parameter
# that should be displayed on the command line for use in the
# configuration system.
class ParamInfo(object):
def __init__(self, type, desc, type_str, example, default_val, access_str):
self.type = type
self.desc = desc
self.type_str = type_str
self.example_str = example
self.default_val = default_val
# The string representation used to access this param through python.
# The method to access this parameter presented on the command line may
# be different, so this needs to be stored for later use.
self.access_str = access_str
self.created = True
# Make it so we can only set attributes at initialization time
# and effectively make this a const object.
def __setattr__(self, name, value):
if not "created" in self.__dict__:
self.__dict__[name] = value
# The SimObject class is the root of the special hierarchy. Most of
# the code in this class deals with the configuration hierarchy itself
# (parent/child node relationships).
class SimObject(object):
# Specify metaclass. Any class inheriting from SimObject will
# get this metaclass.
__metaclass__ = MetaSimObject
type = 'SimObject'
abstract = True
cxx_header = "sim/sim_object.hh"
cxx_bases = [ "Drainable", "Serializable" ]
eventq_index = Param.UInt32(Parent.eventq_index, "Event Queue Index")
@classmethod
def export_method_swig_predecls(cls, code):
code('''
%include <std_string.i>
%import "python/swig/drain.i"
%import "python/swig/serialize.i"
''')
@classmethod
def export_methods(cls, code):
code('''
void init();
void loadState(CheckpointIn &cp);
void initState();
void memInvalidate();
void memWriteback();
void regStats();
void resetStats();
void regProbePoints();
void regProbeListeners();
void startup();
''')
# Returns a dict of all the option strings that can be
# generated as command line options for this simobject instance
# by tracing all reachable params in the top level instance and
# any children it contains.
def enumerateParams(self, flags_dict = {},
cmd_line_str = "", access_str = ""):
if hasattr(self, "_paramEnumed"):
print "Cycle detected enumerating params"
else:
self._paramEnumed = True
# Scan the children first to pick up all the objects in this SimObj
for keys in self._children:
child = self._children[keys]
next_cmdline_str = cmd_line_str + keys
next_access_str = access_str + keys
if not isSimObjectVector(child):
next_cmdline_str = next_cmdline_str + "."
next_access_str = next_access_str + "."
flags_dict = child.enumerateParams(flags_dict,
next_cmdline_str,
next_access_str)
# Go through the simple params in the simobject in this level
# of the simobject hierarchy and save information about the
# parameter to be used for generating and processing command line
# options to the simulator to set these parameters.
for keys,values in self._params.items():
if values.isCmdLineSettable():
type_str = ''
ex_str = values.example_str()
ptype = None
if isinstance(values, VectorParamDesc):
type_str = 'Vector_%s' % values.ptype_str
ptype = values
else:
type_str = '%s' % values.ptype_str
ptype = values.ptype
if keys in self._hr_values\
and keys in self._values\
and not isinstance(self._values[keys], m5.proxy.BaseProxy):
cmd_str = cmd_line_str + keys
acc_str = access_str + keys
flags_dict[cmd_str] = ParamInfo(ptype,
self._params[keys].desc, type_str, ex_str,
values.pretty_print(self._hr_values[keys]),
acc_str)
elif not keys in self._hr_values\
and not keys in self._values:
# Empty param
cmd_str = cmd_line_str + keys
acc_str = access_str + keys
flags_dict[cmd_str] = ParamInfo(ptype,
self._params[keys].desc,
type_str, ex_str, '', acc_str)
return flags_dict
# Initialize new instance. For objects with SimObject-valued
# children, we need to recursively clone the classes represented
# by those param values as well in a consistent "deep copy"-style
# fashion. That is, we want to make sure that each instance is
# cloned only once, and that if there are multiple references to
# the same original object, we end up with the corresponding
# cloned references all pointing to the same cloned instance.
def __init__(self, **kwargs):
ancestor = kwargs.get('_ancestor')
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# prepare to memoize any recursively instantiated objects
memo_dict = {}
elif ancestor:
# memoize me now to avoid problems with recursive calls
memo_dict[ancestor] = self
if not ancestor:
ancestor = self.__class__
ancestor._instantiated = True
# initialize required attributes
self._parent = None
self._name = None
self._ccObject = None # pointer to C++ object
self._ccParams = None
self._instantiated = False # really "cloned"
# Clone children specified at class level. No need for a
# multidict here since we will be cloning everything.
# Do children before parameter values so that children that
# are also param values get cloned properly.
self._children = {}
for key,val in ancestor._children.iteritems():
self.add_child(key, val(_memo=memo_dict))
# Inherit parameter values from class using multidict so
# individual value settings can be overridden but we still
# inherit late changes to non-overridden class values.
self._values = multidict(ancestor._values)
self._hr_values = multidict(ancestor._hr_values)
# clone SimObject-valued parameters
for key,val in ancestor._values.iteritems():
val = tryAsSimObjectOrVector(val)
if val is not None:
self._values[key] = val(_memo=memo_dict)
# clone port references. no need to use a multidict here
# since we will be creating new references for all ports.
self._port_refs = {}
for key,val in ancestor._port_refs.iteritems():
self._port_refs[key] = val.clone(self, memo_dict)
# apply attribute assignments from keyword args, if any
for key,val in kwargs.iteritems():
setattr(self, key, val)
# "Clone" the current instance by creating another instance of
# this instance's class, but that inherits its parameter values
# and port mappings from the current instance. If we're in a
# "deep copy" recursive clone, check the _memo dict to see if
# we've already cloned this instance.
def __call__(self, **kwargs):
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# no memo_dict: must be top-level clone operation.
# this is only allowed at the root of a hierarchy
if self._parent:
raise RuntimeError, "attempt to clone object %s " \
"not at the root of a tree (parent = %s)" \
% (self, self._parent)
# create a new dict and use that.
memo_dict = {}
kwargs['_memo'] = memo_dict
elif memo_dict.has_key(self):
# clone already done & memoized
return memo_dict[self]
return self.__class__(_ancestor = self, **kwargs)
def _get_port_ref(self, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = self._port_refs.get(attr)
if ref == None:
ref = self._ports[attr].makeRef(self)
self._port_refs[attr] = ref
return ref
def __getattr__(self, attr):
if self._ports.has_key(attr):
return self._get_port_ref(attr)
if self._values.has_key(attr):
return self._values[attr]
if self._children.has_key(attr):
return self._children[attr]
# If the attribute exists on the C++ object, transparently
# forward the reference there. This is typically used for
# SWIG-wrapped methods such as init(), regStats(),
# resetStats(), startup(), drain(), and
# resume().
if self._ccObject and hasattr(self._ccObject, attr):
return getattr(self._ccObject, attr)
err_string = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, attr)
if not self._ccObject:
err_string += "\n (C++ object is not yet constructed," \
" so wrapped C++ methods are unavailable.)"
raise AttributeError, err_string
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(self, attr, value):
# normal processing for private attributes
if attr.startswith('_'):
object.__setattr__(self, attr, value)
return
if self._ports.has_key(attr):
# set up port connection
self._get_port_ref(attr).connect(value)
return
param = self._params.get(attr)
if param:
try:
hr_value = value
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, self.__class__.__name__, attr, value)
e.args = (msg, )
raise
self._values[attr] = value
# implicitly parent unparented objects assigned as params
if isSimObjectOrVector(value) and not value.has_parent():
self.add_child(attr, value)
# set the human-readable value dict if this is a param
# with a literal value and is not being set as an object
# or proxy.
if not (isSimObjectOrVector(value) or\
isinstance(value, m5.proxy.BaseProxy)):
self._hr_values[attr] = hr_value
return
# if RHS is a SimObject, it's an implicit child assignment
if isSimObjectOrSequence(value):
self.add_child(attr, value)
return
# no valid assignment... raise exception
raise AttributeError, "Class %s has no parameter %s" \
% (self.__class__.__name__, attr)
# this hack allows tacking a '[0]' onto parameters that may or may
# not be vectors, and always getting the first element (e.g. cpus)
def __getitem__(self, key):
if key == 0:
return self
raise IndexError, "Non-zero index '%s' to SimObject" % key
# this hack allows us to iterate over a SimObject that may
# not be a vector, so we can call a loop over it and get just one
# element.
def __len__(self):
return 1
# Also implemented by SimObjectVector
def clear_parent(self, old_parent):
assert self._parent is old_parent
self._parent = None
# Also implemented by SimObjectVector
def set_parent(self, parent, name):
self._parent = parent
self._name = name
# Return parent object of this SimObject, not implemented by SimObjectVector
# because the elements in a SimObjectVector may not share the same parent
def get_parent(self):
return self._parent
# Also implemented by SimObjectVector
def get_name(self):
return self._name
# Also implemented by SimObjectVector
def has_parent(self):
return self._parent is not None
# clear out child with given name. This code is not likely to be exercised.
# See comment in add_child.
def clear_child(self, name):
child = self._children[name]
child.clear_parent(self)
del self._children[name]
# Add a new child to this object.
def add_child(self, name, child):
child = coerceSimObjectOrVector(child)
if child.has_parent():
warn("add_child('%s'): child '%s' already has parent", name,
child.get_name())
if self._children.has_key(name):
# This code path had an undiscovered bug that would make it fail
# at runtime. It had been here for a long time and was only
# exposed by a buggy script. Changes here will probably not be
# exercised without specialized testing.
self.clear_child(name)
child.set_parent(self, name)
self._children[name] = child
# Take SimObject-valued parameters that haven't been explicitly
# assigned as children and make them children of the object that
# they were assigned to as a parameter value. This guarantees
# that when we instantiate all the parameter objects we're still
# inside the configuration hierarchy.
def adoptOrphanParams(self):
for key,val in self._values.iteritems():
if not isSimObjectVector(val) and isSimObjectSequence(val):
# need to convert raw SimObject sequences to
# SimObjectVector class so we can call has_parent()
val = SimObjectVector(val)
self._values[key] = val
if isSimObjectOrVector(val) and not val.has_parent():
warn("%s adopting orphan SimObject param '%s'", self, key)
self.add_child(key, val)
def path(self):
if not self._parent:
return '<orphan %s>' % self.__class__
elif isinstance(self._parent, MetaSimObject):
return str(self.__class__)
ppath = self._parent.path()
if ppath == 'root':
return self._name
return ppath + "." + self._name
def __str__(self):
return self.path()
def config_value(self):
return self.path()
def ini_str(self):
return self.path()
def find_any(self, ptype):
if isinstance(self, ptype):
return self, True
found_obj = None
for child in self._children.itervalues():
visited = False
if hasattr(child, '_visited'):
visited = getattr(child, '_visited')
if isinstance(child, ptype) and not visited:
if found_obj != None and child != found_obj:
raise AttributeError, \
'parent.any matched more than one: %s %s' % \
(found_obj.path, child.path)
found_obj = child
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if found_obj != None and found_obj != match_obj:
raise AttributeError, \
'parent.any matched more than one: %s and %s' % (found_obj.path, match_obj.path)
found_obj = match_obj
return found_obj, found_obj != None
def find_all(self, ptype):
all = {}
# search children
for child in self._children.itervalues():
# a child could be a list, so ensure we visit each item
if isinstance(child, list):
children = child
else:
children = [child]
for child in children:
if isinstance(child, ptype) and not isproxy(child) and \
not isNullPointer(child):
all[child] = True
if isSimObject(child):
# also add results from the child itself
child_all, done = child.find_all(ptype)
all.update(dict(zip(child_all, [done] * len(child_all))))
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if not isproxy(match_obj) and not isNullPointer(match_obj):
all[match_obj] = True
# Also make sure to sort the keys based on the objects' path to
# ensure that the order is the same on all hosts
return sorted(all.keys(), key = lambda o: o.path()), True
def unproxy(self, base):
return self
def unproxyParams(self):
for param in self._params.iterkeys():
value = self._values.get(param)
if value != None and isproxy(value):
try:
value = value.unproxy(self)
except:
print "Error in unproxying param '%s' of %s" % \
(param, self.path())
raise
setattr(self, param, value)
# Unproxy ports in sorted order so that 'append' operations on
# vector ports are done in a deterministic fashion.
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name)
if port != None:
port.unproxy(self)
def print_ini(self, ini_file):
print >>ini_file, '[' + self.path() + ']' # .ini section header
instanceDict[self.path()] = self
if hasattr(self, 'type'):
print >>ini_file, 'type=%s' % self.type
if len(self._children.keys()):
print >>ini_file, 'children=%s' % \
' '.join(self._children[n].get_name() \
for n in sorted(self._children.keys()))
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
print >>ini_file, '%s=%s' % (param,
self._values[param].ini_str())
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
print >>ini_file, '%s=%s' % (port_name, port.ini_str())
print >>ini_file # blank line between objects
# generate a tree of dictionaries expressing all the parameters in the
# instantiated system for use by scripts that want to do power, thermal
# visualization, and other similar tasks
def get_config_as_dict(self):
d = attrdict()
if hasattr(self, 'type'):
d.type = self.type
if hasattr(self, 'cxx_class'):
d.cxx_class = self.cxx_class
# Add the name and path of this object to be able to link to
# the stats
d.name = self.get_name()
d.path = self.path()
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
d[param] = value.config_value()
for n in sorted(self._children.keys()):
child = self._children[n]
# Use the name of the attribute (and not get_name()) as
# the key in the JSON dictionary to capture the hierarchy
# in the Python code that assembled this system
d[n] = child.get_config_as_dict()
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
# Represent each port with a dictionary containing the
# prominent attributes
d[port_name] = port.get_config_as_dict()
return d
def getCCParams(self):
if self._ccParams:
return self._ccParams
cc_params_struct = getattr(m5.internal.params, '%sParams' % self.type)
cc_params = cc_params_struct()
cc_params.pyobj = self
cc_params.name = str(self)
param_names = self._params.keys()
param_names.sort()
for param in param_names:
value = self._values.get(param)
if value is None:
fatal("%s.%s without default or user set value",
self.path(), param)
value = value.getValue()
if isinstance(self._params[param], VectorParamDesc):
assert isinstance(value, list)
vec = getattr(cc_params, param)
assert not len(vec)
for v in value:
vec.append(v)
else:
setattr(cc_params, param, value)
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name, None)
if port != None:
port_count = len(port)
else:
port_count = 0
setattr(cc_params, 'port_' + port_name + '_connection_count',
port_count)
self._ccParams = cc_params
return self._ccParams
# Get C++ object corresponding to this object, calling C++ if
# necessary to construct it. Does *not* recursively create
# children.
def getCCObject(self):
if not self._ccObject:
# Make sure this object is in the configuration hierarchy
if not self._parent and not isRoot(self):
raise RuntimeError, "Attempt to instantiate orphan node"
# Cycles in the configuration hierarchy are not supported. This
# will catch the resulting recursion and stop.
self._ccObject = -1
if not self.abstract:
params = self.getCCParams()
self._ccObject = params.create()
elif self._ccObject == -1:
raise RuntimeError, "%s: Cycle found in configuration hierarchy." \
% self.path()
return self._ccObject
def descendants(self):
yield self
# The order of the dict is implementation dependent, so sort
# it based on the key (name) to ensure the order is the same
# on all hosts
for (name, child) in sorted(self._children.iteritems()):
for obj in child.descendants():
yield obj
# Call C++ to create C++ object corresponding to this object
def createCCObject(self):
self.getCCParams()
self.getCCObject() # force creation
def getValue(self):
return self.getCCObject()
# Create C++ port connections corresponding to the connections in
# _port_refs
def connectPorts(self):
# Sort the ports based on their attribute name to ensure the
# order is the same on all hosts
for (attr, portRef) in sorted(self._port_refs.iteritems()):
portRef.ccConnect()
# Function to provide to C++ so it can look up instances based on paths
def resolveSimObject(name):
obj = instanceDict[name]
return obj.getCCObject()
def isSimObject(value):
return isinstance(value, SimObject)
def isSimObjectClass(value):
return issubclass(value, SimObject)
def isSimObjectVector(value):
return isinstance(value, SimObjectVector)
def isSimObjectSequence(value):
if not isinstance(value, (list, tuple)) or len(value) == 0:
return False
for val in value:
if not isNullPointer(val) and not isSimObject(val):
return False
return True
def isSimObjectOrSequence(value):
return isSimObject(value) or isSimObjectSequence(value)
def isRoot(obj):
from m5.objects import Root
return obj and obj is Root.getInstance()
def isSimObjectOrVector(value):
return isSimObject(value) or isSimObjectVector(value)
def tryAsSimObjectOrVector(value):
if isSimObjectOrVector(value):
return value
if isSimObjectSequence(value):
return SimObjectVector(value)
return None
def coerceSimObjectOrVector(value):
value = tryAsSimObjectOrVector(value)
if value is None:
raise TypeError, "SimObject or SimObjectVector expected"
return value
baseClasses = allClasses.copy()
baseInstances = instanceDict.copy()
def clear():
global allClasses, instanceDict, noCxxHeader
allClasses = baseClasses.copy()
instanceDict = baseInstances.copy()
noCxxHeader = False
# __all__ defines the list of symbols that get exported when
# 'from config import *' is invoked. Try to keep this reasonably
# short to avoid polluting other namespaces.
__all__ = [ 'SimObject' ]
| bsd-3-clause |
coreynicholson/youtube-dl | youtube_dl/extractor/ted.py | 8 | 11869 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
try_get,
)
class TEDIE(InfoExtractor):
IE_NAME = 'ted'
_VALID_URL = r'''(?x)
(?P<proto>https?://)
(?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/
(
(?P<type_playlist>playlists(?:/\d+)?) # We have a playlist
|
((?P<type_talk>talks)) # We have a simple talk
|
(?P<type_watch>watch)/[^/]+/[^/]+
)
(/lang/(.*?))? # The url may contain the language
/(?P<name>[\w-]+) # Here goes the name and then ".html"
.*)$
'''
_TESTS = [{
'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html',
'md5': '0de43ac406aa3e4ea74b66c9c7789b13',
'info_dict': {
'id': '102',
'ext': 'mp4',
'title': 'The illusion of consciousness',
'description': ('Philosopher Dan Dennett makes a compelling '
'argument that not only don\'t we understand our own '
'consciousness, but that half the time our brains are '
'actively fooling us.'),
'uploader': 'Dan Dennett',
'width': 853,
'duration': 1308,
}
}, {
'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms',
'md5': 'b899ac15e345fb39534d913f7606082b',
'info_dict': {
'id': 'tSVI8ta_P4w',
'ext': 'mp4',
'title': 'Vishal Sikka: The beauty and power of algorithms',
'thumbnail': r're:^https?://.+\.jpg',
'description': 'md5:6261fdfe3e02f4f579cbbfc00aff73f4',
'upload_date': '20140122',
'uploader_id': 'TEDInstitute',
'uploader': 'TED Institute',
},
'add_ie': ['Youtube'],
}, {
'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best',
'md5': '71b3ab2f4233012dce09d515c9c39ce2',
'info_dict': {
'id': '1972',
'ext': 'mp4',
'title': 'Be passionate. Be courageous. Be your best.',
'uploader': 'Gabby Giffords and Mark Kelly',
'description': 'md5:5174aed4d0f16021b704120360f72b92',
'duration': 1128,
},
}, {
'url': 'http://www.ted.com/playlists/who_are_the_hackers',
'info_dict': {
'id': '10',
'title': 'Who are the hackers?',
},
'playlist_mincount': 6,
}, {
# contains a youtube video
'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything',
'add_ie': ['Youtube'],
'info_dict': {
'id': '_ZG8HBuDjgc',
'ext': 'webm',
'title': 'Douglas Adams: Parrots the Universe and Everything',
'description': 'md5:01ad1e199c49ac640cb1196c0e9016af',
'uploader': 'University of California Television (UCTV)',
'uploader_id': 'UCtelevision',
'upload_date': '20080522',
},
'params': {
'skip_download': True,
},
}, {
# YouTube video
'url': 'http://www.ted.com/talks/jeffrey_kluger_the_sibling_bond',
'add_ie': ['Youtube'],
'info_dict': {
'id': 'aFBIPO-P7LM',
'ext': 'mp4',
'title': 'The hidden power of siblings: Jeff Kluger at TEDxAsheville',
'description': 'md5:3d7a4f50d95ca5dd67104e2a20f43fe1',
'uploader': 'TEDx Talks',
'uploader_id': 'TEDxTalks',
'upload_date': '20111216',
},
'params': {
'skip_download': True,
},
}]
_NATIVE_FORMATS = {
'low': {'width': 320, 'height': 180},
'medium': {'width': 512, 'height': 288},
'high': {'width': 854, 'height': 480},
}
def _extract_info(self, webpage):
info_json = self._search_regex(
r'(?s)q\(\s*"\w+.init"\s*,\s*({.+})\)\s*</script>',
webpage, 'info json')
return json.loads(info_json)
def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE)
if m.group('type').startswith('embed'):
desktop_url = m.group('proto') + 'www' + m.group('urlmain')
return self.url_result(desktop_url, 'TED')
name = m.group('name')
if m.group('type_talk'):
return self._talk_info(url, name)
elif m.group('type_watch'):
return self._watch_info(url, name)
else:
return self._playlist_videos_info(url, name)
def _playlist_videos_info(self, url, name):
'''Returns the videos of the playlist'''
webpage = self._download_webpage(url, name,
'Downloading playlist webpage')
info = self._extract_info(webpage)
playlist_info = try_get(
info, lambda x: x['__INITIAL_DATA__']['playlist'],
dict) or info['playlist']
playlist_entries = [
self.url_result('http://www.ted.com/talks/' + talk['slug'], self.ie_key())
for talk in try_get(
info, lambda x: x['__INITIAL_DATA__']['talks'],
dict) or info['talks']
]
return self.playlist_result(
playlist_entries,
playlist_id=compat_str(playlist_info['id']),
playlist_title=playlist_info['title'])
def _talk_info(self, url, video_name):
webpage = self._download_webpage(url, video_name)
info = self._extract_info(webpage)
talk_info = try_get(
info, lambda x: x['__INITIAL_DATA__']['talks'][0],
dict) or info['talks'][0]
title = talk_info['title'].strip()
external = talk_info.get('external')
if external:
service = external['service']
self.to_screen('Found video from %s' % service)
ext_url = None
if service.lower() == 'youtube':
ext_url = external.get('code')
return {
'_type': 'url',
'url': ext_url or external['uri'],
}
native_downloads = try_get(
talk_info, lambda x: x['downloads']['nativeDownloads'],
dict) or talk_info['nativeDownloads']
formats = [{
'url': format_url,
'format_id': format_id,
'format': format_id,
} for (format_id, format_url) in native_downloads.items() if format_url is not None]
if formats:
for f in formats:
finfo = self._NATIVE_FORMATS.get(f['format_id'])
if finfo:
f.update(finfo)
player_talk = talk_info['player_talks'][0]
resources_ = player_talk.get('resources') or talk_info.get('resources')
http_url = None
for format_id, resources in resources_.items():
if format_id == 'h264':
for resource in resources:
h264_url = resource.get('file')
if not h264_url:
continue
bitrate = int_or_none(resource.get('bitrate'))
formats.append({
'url': h264_url,
'format_id': '%s-%sk' % (format_id, bitrate),
'tbr': bitrate,
})
if re.search(r'\d+k', h264_url):
http_url = h264_url
elif format_id == 'rtmp':
streamer = talk_info.get('streamer')
if not streamer:
continue
for resource in resources:
formats.append({
'format_id': '%s-%s' % (format_id, resource.get('name')),
'url': streamer,
'play_path': resource['file'],
'ext': 'flv',
'width': int_or_none(resource.get('width')),
'height': int_or_none(resource.get('height')),
'tbr': int_or_none(resource.get('bitrate')),
})
elif format_id == 'hls':
formats.extend(self._extract_m3u8_formats(
resources.get('stream'), video_name, 'mp4', m3u8_id=format_id, fatal=False))
m3u8_formats = list(filter(
lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none',
formats))
if http_url:
for m3u8_format in m3u8_formats:
bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None)
if not bitrate:
continue
f = m3u8_format.copy()
f.update({
'url': re.sub(r'\d+k', bitrate, http_url),
'format_id': m3u8_format['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
formats.append(f)
audio_download = talk_info.get('audioDownload')
if audio_download:
formats.append({
'url': audio_download,
'format_id': 'audio',
'vcodec': 'none',
})
self._sort_formats(formats)
video_id = compat_str(talk_info['id'])
return {
'id': video_id,
'title': title,
'uploader': player_talk.get('speaker') or talk_info.get('speaker'),
'thumbnail': player_talk.get('thumb') or talk_info.get('thumb'),
'description': self._og_search_description(webpage),
'subtitles': self._get_subtitles(video_id, talk_info),
'formats': formats,
'duration': talk_info.get('duration'),
}
def _get_subtitles(self, video_id, talk_info):
languages = [lang['languageCode'] for lang in talk_info.get('languages', [])]
if languages:
sub_lang_list = {}
for l in languages:
sub_lang_list[l] = [
{
'url': 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/%s' % (video_id, l, ext),
'ext': ext,
}
for ext in ['ted', 'srt']
]
return sub_lang_list
else:
return {}
def _watch_info(self, url, name):
webpage = self._download_webpage(url, name)
config_json = self._html_search_regex(
r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>',
webpage, 'config', default=None)
if not config_json:
embed_url = self._search_regex(
r"<iframe[^>]+class='pages-video-embed__video__object'[^>]+src='([^']+)'", webpage, 'embed url')
return self.url_result(self._proto_relative_url(embed_url))
config = json.loads(config_json)['config']
video_url = config['video']['url']
thumbnail = config.get('image', {}).get('url')
title = self._html_search_regex(
r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title')
description = self._html_search_regex(
[
r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>',
r'(?s)<p><strong>About this talk:</strong>\s+(.*?)</p>',
],
webpage, 'description', fatal=False)
return {
'id': name,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'description': description,
}
| unlicense |
bramalingam/bioformats | components/xsd-fu/xslt/xsltbasic.py | 2 | 20326 | #!/usr/bin/env python
# encoding: utf-8
"""
...
"""
#
# Copyright (C) 2009 - 2016 Open Microscopy Environment. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import unittest
from copy import deepcopy
# We're using lxml's ElementTree implementation for XML manipulation due to
# its XSLT integration.
from lxml.etree import Element
# Handle Python 2.5 built-in ElementTree
# try:
# from xml.etree.ElementTree import XML, Element, SubElement, ElementTree
# from xml.etree..ElementTree import dump
# except ImportError:
# from elementtree.ElementTree import XML, Element, SubElement
# from elementtree.ElementTree import ElementTree, dump
class XsltBasic(unittest.TestCase):
# Create the XPath for the element in the scope of root or local.
# and add attribute if supplied.
def createXPath(self, scope, NS, elementName, attribute=None):
if(scope == 'local'):
scope = './'
if(scope == 'root'):
scope = './/'
if(scope == 'all'):
return '%s[@%s]' % (elementName, attribute)
if(attribute is not None):
return '%s{%s}%s[@%s]' % (scope, NS, elementName, attribute)
return '%s{%s}%s' % (scope, NS, elementName)
# return the name of the element without NameSpace e.g.
# {NameSpace}elementName.
def localName(self, elementTag):
return elementTag[elementTag.find("}") + 1:]
# if var is not none then remove trailing spaces and if '' then return
# None.
def stripStr(self, var):
if(var is not None):
if(var.strip() != ''):
return var.strip()
return None
# Get all elements from rootElement in elementList in namespace NS.
def getAllElements(self, rootElement, NS, elementList):
returnList = []
for elementName in elementList:
elementXPath = self.createXPath('root', NS, elementName)
foundElements = rootElement.findall(elementXPath)
returnList.extend(foundElements)
return returnList
# Check that the elements in the exclusionList are not in the element.
def checkElementsExcluded(self, element, exclusionList):
children = element.getchildren()
for child in children:
self.assertFalse(child in exclusionList)
# Check that the attributes in element with Namespace NS are not in
# exclusionList.
def checkAttributesExcluded(self, root, NS, element, exclusionList):
for attribute in exclusionList:
xpath = self.createXPath('all', NS, element, attribute)
self.assertTrue(len(root.findall(xpath)) == 0)
# Check the alll the elements in oldRoot with namespace oldNS have been
# mapped to newRoot with namespace newNS.
# Rename those elements in renameMap.
def checkElementsMapped(self, oldRoot, oldNS, newRoot, newNS, renameMap):
for mappedElement in renameMap:
oldXPath = self.createXPath('root', oldNS, mappedElement)
newXPath = self.createXPath(
'root', newNS, renameMap[mappedElement])
oldElements = oldRoot.findall(oldXPath)
newElements = newRoot.findall(newXPath)
self.assertEqual(len(oldElements), len(newElements))
self.assertTrue(len(newRoot.findall(oldXPath)) == 0)
# Check the alll the elements in oldRoot with namespace oldNS have been
# mapped to newRoot with namespace newNS.
# Rename those elements in renameMap.
def checkElementsMappedNoCount(self, oldRoot, oldNS, newRoot, newNS,
renameMap):
for mappedElement in renameMap:
oldXPath = self.createXPath('root', oldNS, mappedElement)
self.createXPath('root', newNS, renameMap[mappedElement])
oldElements = oldRoot.findall(oldXPath)
self.assertTrue(len(oldElements) == 0)
# Compare Elements in oldElement with the NameSpace oldElementNS to the
# attributes with the same name in newElement.
# Don't compare those elements in the exceptionList.
# Rename those attributes in the renameMap.
def compareElementsWithAttributes(self, oldElement, oldElementNS,
newElement, exceptionList=None,
renameMap=None):
for oldChildElement in oldElement.getchildren():
elementName = self.localName(oldChildElement.tag)
if(exceptionList is not None):
if(elementName in exceptionList):
continue
mappedName = elementName
if(renameMap is not None):
if(elementName in renameMap):
mappedName = renameMap[mappedName]
newValue = newElement.get(mappedName)
self.assertFalse(newValue is None)
self.assertEquals(newValue, self.stripStr(oldChildElement.text))
# Compare Elements in left with the attributes in right if they are in
# comparison map.
def compareElementsWithAttributesFromMap(self, left, right,
comparisonMap):
for leftChild in left.getchildren():
leftChildName = self.localName(leftChild.tag)
if(leftChildName not in comparisonMap):
continue
mappedName = comparisonMap[leftChildName]
newValue = right.get(mappedName)
self.assertFalse(newValue is None)
self.assertEquals(newValue, self.stripStr(leftChild.text))
# Check that the element contains the elements in containsList
def containsElements(self, element, NS, containsList):
containsMap = {}
for name in containsList:
containsMap[name] = False
for child in element.getchildren():
elementName = self.localName(child.tag)
if(elementName in containsMap):
containsMap[elementName] = True
for key in containsMap:
self.assertEquals(containsMap[key], True)
# Check that the element contains the elements in containsMap with the
# values in the map
def containsElementsWithValues(self, element, NS, containsMap):
equalsMap = {}
for key in containsMap:
equalsMap[key] = False
for child in element.getchildren():
elementName = self.localName(child.tag)
if(elementName in containsMap):
if(containsMap[elementName] == self.stripStr(child.text)):
equalsMap[elementName] = True
for key in equalsMap:
self.assertEquals(equalsMap[key], True)
# Check that the element contains the attributes in containsList
def containsAttributes(self, element, containsList):
containsMap = {}
for name in containsList:
containsMap[name] = False
for attribute in element.attrib.keys():
if(attribute in containsMap):
containsMap[attribute] = True
for key in containsMap:
self.assertEquals(containsMap[key], True)
# Check that the element contains the attributes in containsMap and the
# values in the map match the values in the element.
def containsAttributesWithValues(self, element, containsMap):
equalsMap = {}
for key in containsMap:
equalsMap[key] = False
for attribute in element.attrib.keys():
if(attribute in containsMap):
if(containsMap[attribute] == element.get(attribute)):
equalsMap[attribute] = True
for key in equalsMap:
self.assertEquals(equalsMap[key], True)
# Get elements in list as a map from element [name:value], removing
# namespace.
def getElementsAsMap(self, element):
childMap = {}
for child in element.getchildren():
childMap[self.localname(child.tag)] = child.text
return childMap
# Get attributes in list as a map from element [name:value].
def getElementsAsMap2(self, element):
attributeMap = {}
for attribute in element.attrib.keys():
attributeMap[attribute] = element.get(attribute)
return attributeMap
# Compare elements from oldElement in oldElement NameSpace to the
# newElement in newElement NameSpace.
# Don't compare those elements in the exceptionList list.
# Rename those elements in the renameMap.
def compareElements(self, oldElement, oldElementNS, newElement,
newElementNS, exceptionList=None, renameMap=None,
inclusionList=None):
inclusionMap = {}
if (inclusionList is not None):
for elem in inclusionList:
inclusionMap[elem] = False
for oldChildElement in oldElement.getchildren():
elementName = self.localName(oldChildElement.tag)
if (exceptionList is not None):
if (elementName in exceptionList):
continue
mappedName = elementName
if renameMap is not None:
if(elementName in renameMap):
mappedName = renameMap[elementName]
if(elementName in inclusionMap):
inclusionMap[elementName] = True
newChildXPath = self.createXPath(
'local', newElementNS, mappedName)
newChildElement = newElement.find(newChildXPath)
self.assertFalse(newChildElement, None)
self.assertEquals(self.stripStr(newChildElement.text),
self.stripStr(oldChildElement.text))
for key in inclusionMap:
self.assertEquals(inclusionMap[key], True)
# Compare attributes from oldElement to new element
# Don't compare those elements in the exceptionList.
# Rename those elements in the renameMap.
def compareAttributes(self, oldElement, newElement, exceptionList=None,
renameMap=None):
for key in oldElement.attrib.keys():
if exceptionList is not None:
if(key in exceptionList):
continue
mappedKey = key
if renameMap is not None:
if(key in renameMap):
mappedKey = renameMap[key]
newValue = newElement.get(mappedKey)
oldValue = oldElement.get(key)
if(oldValue != newValue):
print 'FAILURE in xsltbasic.compareAttributes'
print 'EXCEPTIONLIST %s' % exceptionList
print 'oldElement.tag %s' % oldElement.tag
print 'newElement.tag %s' % newElement.tag
print 'key %s' % key
print 'old %s' % oldValue
print 'new %s' % newValue
print 'END FAILURE'
self.assertEquals(newValue, oldValue)
# Get all the child elements from the element, in namespace.
# Exclude thoses child elements in the exclusions list.
def getChildElements(self, element, elementNS, exceptionList):
childList = []
for child in element.getchildren():
name = self.localName(child.tag)
if(name not in exceptionList):
childList.append(name)
return childList
# Return true if the attributes in the elements left and right match and
# the number if children match.
def elementsEqual(self, left, right, renamedAttributes):
if self.localName(left.tag) != self.localName(right.tag):
return False
if(len(left) != len(right)):
return False
if(len(left.getchildren()) != len(right.getchildren())):
return False
for leftAttribute in left:
if(renamedAttributes[leftAttribute] not in right):
return False
if(left[leftAttribute] != right[renamedAttributes[leftAttribute]]):
return False
return True
# Select the element in rightList who's attributes match the element left.
def getElementFromList(self, left, rightList, renamedAttributes):
for right in rightList:
if self.elementsEqual(left, right, renamedAttributes):
return right
return None
# Compare graph's are same, the attributes and elements maybe renamed
# using the renameAttributes and renameElements map, this method assumes
# that the graphs are in the same element order.
def compareGraphs(self, left, right, ignoreAttributes=None,
renameAttributes=None, renameElements=None):
leftChildren = left.getchildren()
rightChildren = right.getchildren()
self.assertEqual(len(leftChildren), len(rightChildren))
if len(leftChildren) == 0:
return
for i in range(len(leftChildren)):
self.compareAttributes(leftChildren[i], rightChildren[i],
ignoreAttributes, renameAttributes)
if renameElements is None:
self.assertEqual(self.localName(leftChildren[i].tag),
self.localName(rightChildren[i].tag))
else:
leftChildTag = self.localName(leftChildren[i].tag)
if(leftChildTag in renameElements):
leftChildTag = renameElements[leftChildTag]
self.assertEqual(leftChildTag,
self.localName(rightChildren[i].tag))
self.assertEqual(self.stripStr(leftChildren[i].text),
self.stripStr(rightChildren[i].text))
self.compareGraphs(
leftChildren[i], rightChildren[i], ignoreAttributes,
renameAttributes, renameElements)
# Compare graph's are same, the attributes and elements maybe renamed
# using the renameAttributes and renameElements map, this method assumes
# that the graphs are in the same element order.
def compareGraphsWithoutOrder(self, left, right, renameAttributes=None,
renameElements=None, ignoreAttributes=None):
leftChildren = left.getchildren()
rightChildren = right.getchildren()
self.assertEqual(len(leftChildren), len(rightChildren))
if len(leftChildren) == 0:
return
for i in range(len(leftChildren)):
rightChild = self.getElementFromList(
leftChildren[i], rightChildren, renameAttributes)
self.assertTrue(rightChild is not None)
if renameElements is None:
self.assertEqual(self.localName(leftChildren[i].tag),
self.localName(rightChild.tag))
else:
self.assertEqual(
renameElements[self.localName(leftChildren[i].tag)],
self.localName(rightChild.tag))
self.assertEqual(self.stripStr(leftChildren[i].text),
self.stripStr(rightChild.text))
self.compareGraphsWithoutOrder(
leftChildren[i], rightChild, ignoreAttributes,
renameAttributes, renameElements)
# get the name of a reference, by removing the Ref suffix.
def elementRefName(self, name):
return name[:len(name)-3]
# return true if the element is a reference, has Ref suffix.
def isRef(self, element):
return (element.tag[len(element.tag)-3:] == 'Ref')
# return the elemenet in the root tree with name element name and id
# if it does not exist it will return None
def findElementByID(self, root, NS, elementName, id):
elements = self.getAllElements(root, NS, [elementName])
for element in elements:
if element.get('ID') == id:
return element
return None
# create a new element based on the element param, this will copy the
# element tag, and attribs but not children. To copy children use
# deepcopy.
def shallowcopy(self, element):
newElement = Element(element.tag)
newElement.text = element.text
for key in element.keys():
newElement.set(key, element.get(key))
return newElement
# Replace the references in elemenet with the full element from root, this
# method only works on the children of the element, to replace all
# references in element use replaceRefsWithElementRecurse.
# If RefList is not empty it will only replace the References in RefList.
# The elements in RefList should only be the name of the element, ROI not
# ROIRef.
def replaceRefsWithElements(self, root, NS, element, RefList=None):
if RefList is None:
RefList = []
newElement = self.shallowcopy(element)
children = element.getchildren()
if len(children) == 0:
return
for i, child in enumerate(children):
elementName = self.elementRefName(self.localName(child.tag))
if (self.isRef(child) and elementName in RefList):
elementFromRef = self.findElementByID(
root, NS, elementName, child.get('ID'))
newElement.append(deepcopy(elementFromRef))
else:
newElement.append(deepcopy(child))
return newElement
# Replace the references in elemenet with the full element from root, this
# method works to replace all references in element.
# If RefList is not empty it will only replace the References in RefList.
# The elements in RefList should only be the name of the element, ROI not
# ROIRef.
def replaceRefsWithElementsRecurse(self, root, NS, element, RefList=None):
if RefList is None:
RefList = []
newElement = self.shallowcopy(element)
children = element.getchildren()
if len(children) == 0:
return newElement
for i, child in enumerate(children):
elementName = self.elementRefName(self.localName(child.tag))
if(self.isRef(child) and elementName in RefList):
elementFromRef = self.findElementByID(
root, NS, elementName, child.get('ID'))
newElement.append(deepcopy(elementFromRef))
else:
newElement.append(self.replaceRefsWithElementsRecurse(
root, NS, child, RefList))
return newElement
# Move the child elements from removeElement to removeElements parent
# element and remove it from the element.
def moveElementsFromChildToParent(self, element, NS, removeElement):
returnElement = deepcopy(element)
xpath = self.createXPath('root', NS, removeElement)
elementsToRemove = returnElement.findall(xpath)
for elementToRemove in elementsToRemove:
elementsParent = elementToRemove.getparent()
elementsParent.remove(elementToRemove)
for child in elementToRemove.getchildren():
elementsParent.append(child)
return returnElement
| gpl-2.0 |
kralf/morsel | python/lib/morsel/nodes/ode/solids/mesh.py | 1 | 1092 | from morsel.panda import *
from morsel.nodes.node import Node
from morsel.nodes.ode.object import Object
from morsel.nodes.facade import Mesh as _Mesh
from morsel.nodes.ode.solid import Solid
#-------------------------------------------------------------------------------
class Mesh(Solid):
def __init__(self, **kargs):
super(Mesh, self).__init__(**kargs)
#-------------------------------------------------------------------------------
def getMesh(self):
if not self._mesh and self.object:
self._mesh = _Mesh(parent = self)
self._mesh.copyFrom(self.object.mesh.model, flatten = True)
return self._mesh
mesh = property(getMesh)
#-------------------------------------------------------------------------------
def fit(self, node):
Solid.fit(self, node)
mesh = _Mesh(position = self.globalPosition, orientation =
self.globalOrientation)
mesh.copyFrom(node.mesh, flatten = True)
data = panda.OdeTriMeshData(mesh)
mesh.detachNode()
self.geometry = panda.OdeTriMeshGeom(node.world.space, data)
| gpl-2.0 |
alexhilton/miscellaneous | python/pygrep.py | 1 | 3334 | #!/usr/bin/env python
"""A Python version of grep utility.
Search one or more named input files against one ore more given patterns.
Print the line containing the match, if there are any.
"""
from optparse import OptionParser;
import re;
import fileinput;
import os.path;
FILENAME = '\033[92m';
LINENO = '\033[94m';
MATCH = '\033[91m';
ENDC = '\033[0m';
class MultiMatcher(object):
"""A set of searchable Regular Expression Patterns
Accept one or more regular expression such that if any one of them
matches a line the first successful match is returned.
"""
def __init__(self, multipattern, ignore_case):
flags = 0;
if ignore_case:
flags = re.IGNORECASE;
self.multipattern = [re.compile(pattern, flags) for pattern in multipattern];
def search(self, line):
for pattern in self.multipattern:
m = pattern.search(line);
if m is not None:
return m;
def build_options():
parser = OptionParser(usage = "usage: %prog [options] -e PATTERN files", version = "%prog 1.0");
parser.add_option("-i", "--ignore-case", action = "store_true", dest = "ignore_case",
default = False, help = "ignore case of letters when matching");
parser.add_option("-r", "--recursive", action = "store_true", dest = "recursive",
default = False, help = "search for files in directory recursively");
parser.add_option("-n", "--negative", action = "store_true", dest = "negative",
default = False, help = "show the lines that does not match the pattern");
parser.add_option("-e", "--regexpr", action = "append", dest = "regexpr",
help = "specify pattern expression on which to match");
return parser;
def do_matching(filename, matcher):
for line in fileinput.input(filename):
line = line.rstrip();
match = matcher.search(line);
if options.negative:
if match is None:
print "%s%s:%s%d %s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), ENDC, line);
else:
if match is not None:
first_part = line[:match.start()];
the_match = line[match.start():match.end()];
second_part = line[match.end():];
print "%s%s:%s%d %s%s%s%s%s%s" % (FILENAME, fileinput.filename(), LINENO, fileinput.filelineno(), \
ENDC, first_part, MATCH, the_match, ENDC, second_part);
def main():
global options;
parser = build_options();
options, args = parser.parse_args();
if not options.regexpr:
parser.error("You must specify at least one PATTERN");
if not args:
parser.error("You must specify at least one input file or directory");
matcher = MultiMatcher(options.regexpr, options.ignore_case);
for filename in args:
if not os.path.exists(filename):
print "No such file or directory: ", filename;
continue;
if options.recursive and os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
[do_matching(os.path.join(root, entry), matcher) for entry in files];
elif os.path.isfile(filename):
do_matching(filename, matcher);
if __name__ == "__main__":
main();
| apache-2.0 |
tejal29/pants | src/python/pants/base/exceptions.py | 1 | 1226 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
class TaskError(Exception):
"""Indicates a task has failed."""
def __init__(self, *args, **kwargs):
""":param int exit_code: an optional exit code (1, by default)"""
self._exit_code = kwargs.pop('exit_code', 1)
super(TaskError, self).__init__(*args, **kwargs)
@property
def exit_code(self):
return self._exit_code
class TargetDefinitionException(Exception):
"""Indicates an invalid target definition."""
def __init__(self, target, msg):
"""
:param target: the target in question
:param string msg: a description of the target misconfiguration
"""
super(Exception, self).__init__('Invalid target %s: %s' % (target, msg))
class BuildConfigurationError(Exception):
"""Indicates an error in a pants installation's configuration."""
class BackendConfigurationError(BuildConfigurationError):
"""Indicates a plugin backend with a missing or malformed register module."""
| apache-2.0 |
jmarcelino/pycom-micropython | tests/wipy/pin.py | 65 | 4862 | """
This test need a set of pins which can be set as inputs and have no external
pull up or pull down connected.
GP12 and GP17 must be connected together
"""
from machine import Pin
import os
mch = os.uname().machine
if 'LaunchPad' in mch:
pin_map = ['GP24', 'GP12', 'GP14', 'GP15', 'GP16', 'GP17', 'GP28', 'GP8', 'GP6', 'GP30', 'GP31', 'GP3', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
elif 'WiPy' in mch:
pin_map = ['GP23', 'GP24', 'GP12', 'GP13', 'GP14', 'GP9', 'GP17', 'GP28', 'GP22', 'GP8', 'GP30', 'GP31', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
else:
raise Exception('Board not supported!')
# test initial value
p = Pin('GP12', Pin.IN)
Pin('GP17', Pin.OUT, value=1)
print(p() == 1)
Pin('GP17', Pin.OUT, value=0)
print(p() == 0)
def test_noinit():
for p in pin_map:
pin = Pin(p)
pin.value()
def test_pin_read(pull):
# enable the pull resistor on all pins, then read the value
for p in pin_map:
pin = Pin(p, mode=Pin.IN, pull=pull)
for p in pin_map:
print(pin())
def test_pin_af():
for p in pin_map:
for af in Pin(p).alt_list():
if af[1] <= max_af_idx:
Pin(p, mode=Pin.ALT, alt=af[1])
Pin(p, mode=Pin.ALT_OPEN_DRAIN, alt=af[1])
# test un-initialized pins
test_noinit()
# test with pull-up and pull-down
test_pin_read(Pin.PULL_UP)
test_pin_read(Pin.PULL_DOWN)
# test all constructor combinations
pin = Pin(pin_map[0])
pin = Pin(pin_map[0], mode=Pin.IN)
pin = Pin(pin_map[0], mode=Pin.OUT)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OPEN_DRAIN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=None)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.MED_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_DOWN)
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP)
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP)
test_pin_af() # try the entire af range on all pins
# test pin init and printing
pin = Pin(pin_map[0])
pin.init(mode=Pin.IN)
print(pin)
pin.init(Pin.IN, Pin.PULL_DOWN)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
print(pin)
# test value in OUT mode
pin = Pin(pin_map[0], mode=Pin.OUT)
pin.value(0)
pin.toggle() # test toggle
print(pin())
pin.toggle() # test toggle again
print(pin())
# test different value settings
pin(1)
print(pin.value())
pin(0)
print(pin.value())
pin.value(1)
print(pin())
pin.value(0)
print(pin())
# test all getters and setters
pin = Pin(pin_map[0], mode=Pin.OUT)
# mode
print(pin.mode() == Pin.OUT)
pin.mode(Pin.IN)
print(pin.mode() == Pin.IN)
# pull
pin.pull(None)
print(pin.pull() == None)
pin.pull(Pin.PULL_DOWN)
print(pin.pull() == Pin.PULL_DOWN)
# drive
pin.drive(Pin.MED_POWER)
print(pin.drive() == Pin.MED_POWER)
pin.drive(Pin.HIGH_POWER)
print(pin.drive() == Pin.HIGH_POWER)
# id
print(pin.id() == pin_map[0])
# all the next ones MUST raise
try:
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.IN) # incorrect drive value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.LOW_POWER, pull=Pin.PULL_UP) # incorrect mode value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.HIGH_POWER) # incorrect pull value
except Exception:
print('Exception')
try:
pin = Pin('A0', Pin.OUT, Pin.PULL_DOWN) # incorrect pin id
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.IN, Pin.PULL_UP, alt=0) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_UP, alt=7) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP, alt=0) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=-1) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=16) # incorrect af
except Exception:
print('Exception')
try:
pin.mode(Pin.PULL_UP) # incorrect pin mode
except Exception:
print('Exception')
try:
pin.pull(Pin.OUT) # incorrect pull
except Exception:
print('Exception')
try:
pin.drive(Pin.IN) # incorrect drive strength
except Exception:
print('Exception')
try:
pin.id('ABC') # id cannot be set
except Exception:
print('Exception')
| mit |
etkirsch/legends-of-erukar | erukar/content/inventory/weapons/standard/Focus.py | 1 | 1029 | import numpy as np
from erukar.system.engine.inventory import ArcaneWeapon
class Focus(ArcaneWeapon):
Probability = 1
BaseName = "Focus"
EssentialPart = "devotion"
AttackRange = 3
RangePenalty = 3
BaseWeight = 1.0
# Damage
DamageRange = [2, 5]
DamageType = 'force'
DamageModifier = "sense"
DamageScalar = 2.4
ScalingRequirement = 6
EnergyCost = 5
# Distribution
Distribution = np.random.gamma
DistributionProperties = (2, 0.3)
BaseStatInfluences = {
'sense': {'requirement': 8, 'scaling_factor': 3.5, 'cutoff': 200},
'acuity': {'requirement': 0, 'scaling_factor': 1.2, 'cutoff': 100},
}
def failing_requirements(self, wielder):
if wielder.arcane_energy < self.EnergyCost:
return ['Not enough Arcane Energy to use {} -- need {}, have {}'.format(self.alias(), self.EnergyCost, wielder.arcane_energy)]
def on_calculate_attack(self, cmd):
cmd.args['player_lifeform'].arcane_energy -= self.EnergyCost
| agpl-3.0 |
leighpauls/k2cro4 | native_client/build/directory_storage.py | 3 | 2418 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implement directory storage on top of file only storage.
Given a storage object capable of storing and retrieving files,
embellish with methods for storing and retrieving directories (using tar).
"""
import os
import sys
import tempfile
import file_tools
import subprocess
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CYGTAR_PATH = os.path.join(SCRIPT_DIR, 'cygtar.py')
class DirectoryStorageAdapter(object):
"""Adapter that implements directory storage on top of file storage.
Tars directories as needed to keep operations on the data-store atomic.
"""
def __init__(self, storage):
"""Init for this class.
Args:
storage: File storage object supporting GetFile and PutFile.
"""
self._storage = storage
def PutDirectory(self, path, key):
"""Write a directory to storage.
Args:
path: Path of the directory to write.
key: Key to store under.
"""
handle, tmp_tgz = tempfile.mkstemp(prefix='dirstore', suffix='.tmp.tgz')
try:
os.close(handle)
# Calling cygtar thru subprocess as it's cwd handling is not currently
# usable.
subprocess.check_call([sys.executable, CYGTAR_PATH,
'-c', '-z', '-f', os.path.abspath(tmp_tgz), '.'],
cwd=os.path.abspath(path))
return self._storage.PutFile(tmp_tgz, key)
finally:
os.remove(tmp_tgz)
def GetDirectory(self, key, path):
"""Read a directory from storage.
Clobbers anything at the destination currently.
Args:
key: Key to fetch from.
path: Path of the directory to write.
"""
file_tools.RemoveDirectoryIfPresent(path)
os.mkdir(path)
handle, tmp_tgz = tempfile.mkstemp(prefix='dirstore', suffix='.tmp.tgz')
try:
os.close(handle)
url = self._storage.GetFile(key, tmp_tgz)
if url is None:
return None
# Calling cygtar thru subprocess as it's cwd handling is not currently
# usable.
subprocess.check_call([sys.executable, CYGTAR_PATH,
'-x', '-z', '-f', os.path.abspath(tmp_tgz)],
cwd=os.path.abspath(path))
return url
finally:
os.remove(tmp_tgz)
| bsd-3-clause |
agx/git-buildpackage | tests/component/deb/__init__.py | 1 | 1200 | # vim: set fileencoding=utf-8 :
#
# (C) 2012 Intel Corporation <markus.lehtonen@linux.intel.com>
# (C) 2013 Guido Günther <agx@sigxcpu.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
import os
from tests.component import ComponentTestGitRepository
DEB_TEST_SUBMODULE = os.path.join('tests', 'component', 'deb', 'data')
DEB_TEST_DATA_DIR = os.path.abspath(DEB_TEST_SUBMODULE)
DEB_TEST_DOWNLOAD_URL = 'https://git.sigxcpu.org/cgit/gbp/deb-testdata/plain/'
def setup():
"""Test Module setup"""
ComponentTestGitRepository.check_testdata(DEB_TEST_SUBMODULE)
| gpl-2.0 |
hradec/cortex | test/IECoreGL/BufferTest.py | 12 | 2568 | ##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreGL
IECoreGL.init( False )
class BufferTest( unittest.TestCase ) :
def test( self ) :
d = IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 0, 100 ) ] )
b = IECoreGL.CachedConverter.defaultCachedConverter().convert( d )
self.failUnless( isinstance( b, IECoreGL.Buffer ) )
self.assertEqual( b.size(), 100 * 3 * 4 )
b2 = IECoreGL.CachedConverter.defaultCachedConverter().convert( d )
self.failUnless( b2.isSame( b ) )
d2 = IECore.V3fVectorData( [ IECore.V3f( x * 2 ) for x in range( 0, 50 ) ] )
b3 = IECoreGL.CachedConverter.defaultCachedConverter().convert( d2 )
self.failUnless( isinstance( b, IECoreGL.Buffer ) )
self.failIf( b3.isSame( b ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
nke001/attention-lvcsr | libs/Theano/theano/gof/tests/test_graph.py | 2 | 14643 | from __future__ import print_function
import pickle
import unittest
import numpy
from itertools import count
from theano import (
sparse,
shared, tensor)
from theano.gof.graph import (
Apply,
as_string, clone, general_toposort, inputs, io_toposort,
is_same_graph, Variable)
from theano.gof.op import Op
from theano.gof.type import Type
from theano.sandbox.cuda.var import (
CudaNdarrayVariable, CudaNdarrayConstant, CudaNdarraySharedVariable)
def as_variable(x):
assert isinstance(x, Variable)
return x
class MyType(Type):
def __init__(self, thingy):
self.thingy = thingy
def __eq__(self, other):
return isinstance(other, MyType) and other.thingy == self.thingy
def __str__(self):
return 'R%s' % str(self.thingy)
def __repr__(self):
return 'R%s' % str(self.thingy)
def MyVariable(thingy):
return Variable(MyType(thingy), None, None)
class MyOp(Op):
__props__ = ()
def make_node(self, *inputs):
inputs = list(map(as_variable, inputs))
for input in inputs:
if not isinstance(input.type, MyType):
print(input, input.type, type(input), type(input.type))
raise Exception("Error 1")
outputs = [MyVariable(sum([input.type.thingy for input in inputs]))]
return Apply(self, inputs, outputs)
MyOp = MyOp()
##########
# inputs #
##########
class TestInputs:
def test_inputs(self):
r1, r2 = MyVariable(1), MyVariable(2)
node = MyOp.make_node(r1, r2)
assert inputs(node.outputs) == [r1, r2]
def test_inputs_deep(self):
r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)
node = MyOp.make_node(r1, r2)
node2 = MyOp.make_node(node.outputs[0], r5)
i = inputs(node2.outputs)
assert i == [r1, r2, r5], i
#############
# as_string #
#############
class X:
def leaf_formatter(self, leaf):
return str(leaf.type)
def node_formatter(self, node, argstrings):
return "%s(%s)" % (node.op, ", ".join(argstrings))
def str(self, inputs, outputs):
return as_string(inputs, outputs,
leaf_formatter=self.leaf_formatter,
node_formatter=self.node_formatter)
class TestStr(X):
def test_as_string(self):
r1, r2 = MyVariable(1), MyVariable(2)
node = MyOp.make_node(r1, r2)
s = self.str([r1, r2], node.outputs)
assert s == ["MyOp(R1, R2)"]
def test_as_string_deep(self):
r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)
node = MyOp.make_node(r1, r2)
node2 = MyOp.make_node(node.outputs[0], r5)
s = self.str([r1, r2, r5], node2.outputs)
assert s == ["MyOp(MyOp(R1, R2), R5)"]
def test_multiple_references(self):
r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)
node = MyOp.make_node(r1, r2)
node2 = MyOp.make_node(node.outputs[0], node.outputs[0])
assert self.str([r1, r2, r5], node2.outputs) == ["MyOp(*1 -> MyOp(R1, R2), *1)"]
def test_cutoff(self):
r1, r2 = MyVariable(1), MyVariable(2)
node = MyOp.make_node(r1, r2)
node2 = MyOp.make_node(node.outputs[0], node.outputs[0])
assert self.str(node.outputs, node2.outputs) == ["MyOp(R3, R3)"]
assert self.str(node2.inputs, node2.outputs) == ["MyOp(R3, R3)"]
#########
# clone #
#########
class TestClone(X):
def test_accurate(self):
r1, r2 = MyVariable(1), MyVariable(2)
node = MyOp.make_node(r1, r2)
_, new = clone([r1, r2], node.outputs, False)
assert self.str([r1, r2], new) == ["MyOp(R1, R2)"]
def test_copy(self):
r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)
node = MyOp.make_node(r1, r2)
node2 = MyOp.make_node(node.outputs[0], r5)
_, new = clone([r1, r2, r5], node2.outputs, False)
assert node2.outputs[0].type == new[0].type and node2.outputs[0] is not new[0] # the new output is like the old one but not the same object
assert node2 is not new[0].owner # the new output has a new owner
assert new[0].owner.inputs[1] is r5 # the inputs are not copied
assert new[0].owner.inputs[0].type == node.outputs[0].type and new[0].owner.inputs[0] is not node.outputs[0] # check that we copied deeper too
def test_not_destructive(self):
# Checks that manipulating a cloned graph leaves the original unchanged.
r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)
node = MyOp.make_node(MyOp.make_node(r1, r2).outputs[0], r5)
_, new = clone([r1, r2, r5], node.outputs, False)
new_node = new[0].owner
new_node.inputs = MyVariable(7), MyVariable(8)
assert self.str(inputs(new_node.outputs), new_node.outputs) == ["MyOp(R7, R8)"]
assert self.str(inputs(node.outputs), node.outputs) == ["MyOp(MyOp(R1, R2), R5)"]
############
# toposort #
############
def prenode(obj):
if isinstance(obj, Variable):
if obj.owner:
return [obj.owner]
if isinstance(obj, Apply):
return obj.inputs
class TestToposort:
def test_0(self):
"""Test a simple graph"""
r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)
o = MyOp.make_node(r1, r2)
o2 = MyOp.make_node(o.outputs[0], r5)
all = general_toposort(o2.outputs, prenode)
assert all == [r5, r2, r1, o, o.outputs[0], o2, o2.outputs[0]]
all = io_toposort([r5], o2.outputs)
assert all == [o, o2]
def test_1(self):
"""Test a graph with double dependencies"""
r1, r5 = MyVariable(1), MyVariable(5)
o = MyOp.make_node(r1, r1)
o2 = MyOp.make_node(o.outputs[0], r5)
all = general_toposort(o2.outputs, prenode)
assert all == [r5, r1, o, o.outputs[0], o2, o2.outputs[0]]
def test_2(self):
"""Test a graph where the inputs have owners"""
r1, r5 = MyVariable(1), MyVariable(5)
o = MyOp.make_node(r1, r1)
r2b = o.outputs[0]
o2 = MyOp.make_node(r2b, r2b)
all = io_toposort([r2b], o2.outputs)
assert all == [o2]
o2 = MyOp.make_node(r2b, r5)
all = io_toposort([r2b], o2.outputs)
assert all == [o2]
def test_3(self):
"""Test a graph which is not connected"""
r1, r2, r3, r4 = MyVariable(1), MyVariable(2), MyVariable(3), MyVariable(4)
o0 = MyOp.make_node(r1, r2)
o1 = MyOp.make_node(r3, r4)
all = io_toposort([r1, r2, r3, r4], o0.outputs + o1.outputs)
assert all == [o1, o0]
def test_4(self):
"""Test inputs and outputs mixed together in a chain graph"""
r1, r2 = MyVariable(1), MyVariable(2)
o0 = MyOp.make_node(r1, r2)
o1 = MyOp.make_node(o0.outputs[0], r1)
all = io_toposort([r1, o0.outputs[0]], [o0.outputs[0], o1.outputs[0]])
assert all == [o1]
def test_5(self):
"""Test when outputs have clients"""
r1, r2, r4 = MyVariable(1), MyVariable(2), MyVariable(4)
o0 = MyOp.make_node(r1, r2)
MyOp.make_node(o0.outputs[0], r4)
all = io_toposort([], o0.outputs)
assert all == [o0]
#################
# is_same_graph #
#################
class TestIsSameGraph(unittest.TestCase):
def check(self, expected, debug=True):
"""
Core function to perform comparison.
:param expected: A list of tuples (v1, v2, ((g1, o1), ..., (gN, oN)))
with:
- `v1` and `v2` two Variables (the graphs to be compared)
- `gj` a `givens` dictionary to give as input to `is_same_graph`
- `oj` the expected output of `is_same_graph(v1, v2, givens=gj)`
:param debug: If True, then we make sure we are testing both
implementations of `is_same_graph`.
This function also tries to call `is_same_graph` by inverting `v1` and
`v2`, and ensures the output remains the same.
"""
for v1, v2, go in expected:
for gj, oj in go:
r1 = is_same_graph(v1, v2, givens=gj, debug=debug)
assert r1 == oj
r2 = is_same_graph(v2, v1, givens=gj, debug=debug)
assert r2 == oj
def test_single_var(self):
"""
Test `is_same_graph` with some trivial graphs (one Variable).
"""
x, y, z = tensor.vectors('x', 'y', 'z')
self.check([
(x, x, (({}, True), )),
(x, y, (({}, False), ({y: x}, True), )),
(x, tensor.neg(x), (({}, False), )),
(x, tensor.neg(y), (({}, False), )),
])
def test_full_graph(self):
"""
Test `is_same_graph` with more complex graphs.
"""
x, y, z = tensor.vectors('x', 'y', 'z')
t = x * y
self.check([
(x * 2, x * 2, (({}, True), )),
(x * 2, y * 2, (({}, False), ({y: x}, True), )),
(x * 2, y * 2, (({}, False), ({x: y}, True), )),
(x * 2, y * 3, (({}, False), ({y: x}, False), )),
(t * 2, z * 2, (({}, False), ({t: z}, True), )),
(t * 2, z * 2, (({}, False), ({z: t}, True), )),
(x * (y * z), (x * y) * z, (({}, False), )),
])
def test_merge_only(self):
"""
Test `is_same_graph` when `equal_computations` cannot be used.
"""
x, y, z = tensor.vectors('x', 'y', 'z')
t = x * y
self.check([
(x, t, (({}, False), ({t: x}, True))),
(t * 2, x * 2, (({}, False), ({t: x}, True), )),
(x * x, x * y, (({}, False), ({y: x}, True), )),
(x * x, x * y, (({}, False), ({y: x}, True), )),
(x * x + z, x * y + t, (({}, False),
({y: x}, False),
({y: x, t: z}, True))),
],
debug=False)
################
# eval #
################
class TestEval(unittest.TestCase):
def setUp(self):
self.x, self.y = tensor.scalars('x', 'y')
self.z = self.x + self.y
self.w = 2 * self.z
def test_eval(self):
self.assertEqual(self.w.eval({self.x: 1., self.y: 2.}), 6.)
self.assertEqual(self.w.eval({self.z: 3}), 6.)
self.assertTrue(hasattr(self.w, "_fn_cache"),
"variable must have cache after eval")
self.assertFalse(hasattr(pickle.loads(pickle.dumps(self.w)), '_fn_cache'),
"temporary functions must not be serialized")
################
# autoname #
################
class TestAutoName:
def test_auto_name(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
r1, r2 = MyVariable(1), MyVariable(2)
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
def test_constant(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
r1 = tensor.constant(1.5)
r2 = tensor.constant(1.5)
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
def test_tensorvariable(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
r1 = tensor.TensorType(dtype='int32', broadcastable=())('myvar')
r2 = tensor.TensorVariable(tensor.TensorType(dtype='int32',
broadcastable=()))
r3 = shared(numpy.random.randn(3, 4))
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
assert r3.auto_name == "auto_" + str(autoname_id + 2)
def test_sparsevariable(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
r1 = sparse.csc_matrix(name='x', dtype='float32')
r2 = sparse.dense_from_sparse(r1)
r3 = sparse.csc_from_dense(r2)
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
assert r3.auto_name == "auto_" + str(autoname_id + 2)
def test_cudandarrayvariable(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
mytype = tensor.TensorType(dtype='int32', broadcastable=())
r1 = CudaNdarrayVariable(type='int32')
r2 = CudaNdarrayVariable(type='int32')
r3 = CudaNdarrayConstant(type=mytype,
data=1)
r4 = CudaNdarraySharedVariable(name='x', type=mytype,
value=1, strict=False)
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
assert r3.auto_name == "auto_" + str(autoname_id + 2)
assert r4.auto_name == "auto_" + str(autoname_id + 3)
def test_randomvariable(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
mytype = tensor.TensorType(dtype='int32', broadcastable=())
r1 = tensor.shared_randomstreams.RandomStateSharedVariable(name='x',
type=mytype,
value=1,
strict=False)
r2 = tensor.shared_randomstreams.RandomStateSharedVariable(name='x',
type=mytype,
value=1,
strict=False)
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
def test_clone(self):
# Get counter value
autoname_id = next(Variable.__count__)
Variable.__count__ = count(autoname_id)
r1 = MyVariable(1)
r2 = r1.clone()
assert r1.auto_name == "auto_" + str(autoname_id)
assert r2.auto_name == "auto_" + str(autoname_id + 1)
| mit |
fishroot/qdeep | lib/qdeep/objects/script/__init__.py | 1 | 6352 | # -*- coding: utf-8 -*-
__author__ = 'Patrick Michl'
__email__ = 'patrick.michl@gmail.com'
__license__ = 'GPLv3'
import nemoa
import qdeep.objects.common
from PySide import QtGui, QtCore
class Editor(qdeep.objects.common.Editor):
objType = 'script'
def createCentralWidget(self):
self.textArea = QtGui.QTextEdit()
self.textArea.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
font = QtGui.QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.textArea.setFont(font)
self.textArea.setAcceptDrops(True)
self.highlighter = Highlighter(self.textArea.document())
self.setCentralWidget(self.textArea)
def createActions(self):
self.actRunScript = QtGui.QAction(
qdeep.common.getIcon('actions', 'debug-run.png'),
"Run Script", self,
shortcut = "F5",
statusTip = "Run python script",
triggered = self.runScript)
def createToolBars(self):
self.scriptToolBar = self.addToolBar("Script")
self.scriptToolBar.addAction(self.actRunScript)
def getModified(self):
return self.textArea.document().isModified()
def setModified(self, value = True):
self.textArea.document().setModified(value)
def loadFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot read file %s:\n%s." % (
fileName, file.errorString()))
return False
instr = QtCore.QTextStream(file)
self.textArea.setPlainText(instr.readAll())
self.textArea.document().contentsChanged.connect(
self.documentWasModified)
return True
def saveFile(self, fileName):
file = QtCore.QFile(fileName)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, "MDI",
"Cannot write file %s:\n%s." % (fileName,
file.errorString()))
return False
outstr = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstr << self.textArea.toPlainText()
QtGui.QApplication.restoreOverrideCursor()
self.setModified(False)
self.updateWindowTitle()
return True
def runScript(self):
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
nemoa.run(self.getName())
QtGui.QApplication.restoreOverrideCursor()
class Highlighter(QtGui.QSyntaxHighlighter):
def __init__(self, parent=None):
super(Highlighter, self).__init__(parent)
keywordFormat = QtGui.QTextCharFormat()
keywordFormat.setForeground(QtCore.Qt.darkBlue)
keywordFormat.setFontWeight(QtGui.QFont.Bold)
keywordPatterns = ["\\bchar\\b", "\\bclass\\b", "\\bconst\\b",
"\\bdouble\\b", "\\benum\\b", "\\bexplicit\\b", "\\bfriend\\b",
"\\binline\\b", "\\bint\\b", "\\blong\\b", "\\bnamespace\\b",
"\\boperator\\b", "\\bprivate\\b", "\\bprotected\\b",
"\\bpublic\\b", "\\bshort\\b", "\\bsignals\\b", "\\bsigned\\b",
"\\bslots\\b", "\\bstatic\\b", "\\bstruct\\b",
"\\btemplate\\b", "\\btypedef\\b", "\\btypename\\b",
"\\bunion\\b", "\\bunsigned\\b", "\\bvirtual\\b", "\\bvoid\\b",
"\\bvolatile\\b", "\\bimport\\b", "\\bdef\\b",
"\\bTrue\\b", "\\bFalse\\b", "\\breturn\\b"]
self.highlightingRules = [(QtCore.QRegExp(pattern), keywordFormat)
for pattern in keywordPatterns]
classFormat = QtGui.QTextCharFormat()
classFormat.setFontWeight(QtGui.QFont.Bold)
classFormat.setForeground(QtCore.Qt.darkMagenta)
self.highlightingRules.append((QtCore.QRegExp("\\bQ[A-Za-z]+\\b"),
classFormat))
singleLineCommentFormat = QtGui.QTextCharFormat()
singleLineCommentFormat.setForeground(QtCore.Qt.red)
self.highlightingRules.append((QtCore.QRegExp("//[^\n]*"),
singleLineCommentFormat))
self.multiLineCommentFormat = QtGui.QTextCharFormat()
self.multiLineCommentFormat.setForeground(QtCore.Qt.red)
quotationFormat = QtGui.QTextCharFormat()
quotationFormat.setForeground(QtCore.Qt.darkGreen)
self.highlightingRules.append((QtCore.QRegExp("\".*\""),
quotationFormat))
self.highlightingRules.append((QtCore.QRegExp("'.*'"),
quotationFormat))
functionFormat = QtGui.QTextCharFormat()
functionFormat.setFontItalic(True)
functionFormat.setForeground(QtCore.Qt.blue)
self.highlightingRules.append((QtCore.QRegExp("\\b[A-Za-z0-9_]+(?=\\()"),
functionFormat))
self.commentStartExpression = QtCore.QRegExp("/\\*")
self.commentEndExpression = QtCore.QRegExp("\\*/")
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
expression = QtCore.QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStartExpression.indexIn(text)
while startIndex >= 0:
endIndex = self.commentEndExpression.indexIn(text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + self.commentEndExpression.matchedLength()
self.setFormat(startIndex, commentLength,
self.multiLineCommentFormat)
startIndex = self.commentStartExpression.indexIn(text,
startIndex + commentLength)
| gpl-3.0 |
ak2703/edx-platform | common/djangoapps/dark_lang/migrations/0001_initial.py | 114 | 4801 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DarkLangConfig'
db.create_table('dark_lang_darklangconfig', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('released_languages', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('dark_lang', ['DarkLangConfig'])
def backwards(self, orm):
# Deleting model 'DarkLangConfig'
db.delete_table('dark_lang_darklangconfig')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dark_lang.darklangconfig': {
'Meta': {'object_name': 'DarkLangConfig'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'released_languages': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['dark_lang']
| agpl-3.0 |
bobeirasa/virtualenvs | pyzabbixhue/lib/python2.7/site-packages/pip/commands/bundle.py | 80 | 2156 | import textwrap
from pip.locations import build_prefix, src_prefix
from pip.util import display_path, backup_dir
from pip.log import logger
from pip.exceptions import InstallationError
from pip.commands.install import InstallCommand
class BundleCommand(InstallCommand):
"""Create pybundles (archives containing multiple packages)."""
name = 'bundle'
usage = """
%prog [options] <bundle name>.pybundle <package>..."""
summary = 'Create pybundles.'
bundle = True
def __init__(self, *args, **kw):
super(BundleCommand, self).__init__(*args, **kw)
# bundle uses different default source and build dirs
build_opt = self.parser.get_option("--build")
build_opt.default = backup_dir(build_prefix, '-bundle')
src_opt = self.parser.get_option("--src")
src_opt.default = backup_dir(src_prefix, '-bundle')
self.parser.set_defaults(**{
src_opt.dest: src_opt.default,
build_opt.dest: build_opt.default,
})
def run(self, options, args):
deprecation = textwrap.dedent("""
###############################################
## ##
## Due to lack of interest and maintenance, ##
## 'pip bundle' and support for installing ##
## from *.pybundle files is now deprecated, ##
## and will be removed in pip v1.5. ##
## ##
###############################################
""")
logger.notify(deprecation)
if not args:
raise InstallationError('You must give a bundle filename')
# We have to get everything when creating a bundle:
options.ignore_installed = True
logger.notify('Putting temporary build files in %s and source/develop files in %s'
% (display_path(options.build_dir), display_path(options.src_dir)))
self.bundle_filename = args.pop(0)
requirement_set = super(BundleCommand, self).run(options, args)
return requirement_set
| mit |
sileht/deb-openstack-nova | nova/tests/integrated/test_servers.py | 5 | 16693 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import unittest
import nova.virt.fake
from nova.log import logging
from nova.tests.integrated import integrated_helpers
from nova.tests.integrated.api import client
LOG = logging.getLogger(__name__)
class ServersTest(integrated_helpers._IntegratedTestBase):
def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _restart_compute_service(self, periodic_interval=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval:
self.compute = self.start_service(
'compute', periodic_interval=periodic_interval)
else:
self.compute = self.start_service('compute')
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
"""Create a server which will enter error state."""
self.flags(stub_network=True)
def throw_error(*_):
raise Exception()
self.stubs.Set(nova.virt.fake.FakeConnection, 'spawn', throw_error)
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual('ERROR', found_server['status'])
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
"""Creates and deletes a server."""
self.flags(stub_network=True)
# Create server
# Build the server data gradually, checking errors along the way
server = {}
good_server = self._build_minimal_create_server_request()
post = {'server': server}
# Without an imageRef, this throws 500.
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# With an invalid imageRef, this throws 500.
server['imageRef'] = self.get_invalid_image()
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Add a valid imageRef
server['imageRef'] = good_server.get('imageRef')
# Without flavorRef, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
server['flavorRef'] = good_server.get('flavorRef')
# Without a name, this throws 500
# TODO(justinsb): Check whatever the spec says should be thrown here
self.assertRaises(client.OpenStackApiException,
self.api.post_server, post)
# Set a valid server name
server['name'] = good_server['name']
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Check it's there
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [server['id'] for server in servers]
self.assertTrue(created_server_id in server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
servers = self.api.get_servers(detail=True)
for server in servers:
self.assertTrue("image" in server)
self.assertTrue("flavor" in server)
self._delete_server(created_server_id)
def test_deferred_delete(self):
"""Creates, deletes and waits for server to be reclaimed."""
self.flags(stub_network=True, reclaim_instance_interval=1)
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Cannot restore unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'restore': {}})
# Cannot forceDelete unless instance is deleted
self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, created_server_id,
{'forceDelete': {}})
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('DELETED', found_server['status'])
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
"""Creates, deletes and restores a server."""
self.flags(stub_network=True, reclaim_instance_interval=1)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('DELETED', found_server['status'])
# Restore server
self.api.post_server_action(created_server_id, {'restore': {}})
# Wait for server to become active again
found_server = self._wait_for_state_change(found_server, 'DELETED')
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
"""Creates, deletes and force deletes a server."""
self.flags(stub_network=True, reclaim_instance_interval=1)
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Wait for it to finish being created
found_server = self._wait_for_state_change(created_server, 'BUILD')
# It should be available...
self.assertEqual('ACTIVE', found_server['status'])
# Delete the server
self.api.delete_server(created_server_id)
# Wait for queued deletion
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('DELETED', found_server['status'])
# Force delete server
self.api.post_server_action(created_server_id, {'forceDelete': {}})
# Wait for real deletion
self._wait_for_deletion(created_server_id)
def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
found_server = None
LOG.debug("Got 404, proceeding")
break
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
#if found_server['status'] != 'deleting':
# break
time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
def _delete_server(self, server_id):
# Delete the server
self.api.delete_server(server_id)
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
"""Creates a server with metadata."""
self.flags(stub_network=True)
# Build the server data gradually, checking errors along the way
server = self._build_minimal_create_server_request()
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server['metadata'] = metadata
post = {'server': server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers details list
servers = self.api.get_servers(detail=True)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Details do include metadata
self.assertEqual(metadata, found_server.get('metadata'))
# The server should also be in the all-servers summary list
servers = self.api.get_servers(detail=False)
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
self.assertTrue(found_server)
# Summary should not include metadata
self.assertFalse(found_server.get('metadata'))
# Cleanup
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
"""Rebuild a server with metadata."""
self.flags(stub_network=True)
# create a server with initially has no metadata
server = self._build_minimal_create_server_request()
server_post = {'server': server}
metadata = {}
for i in range(30):
metadata['key_%s' % i] = 'value_%s' % i
server_post['server']['metadata'] = metadata
created_server = self.api.post_server(server_post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata and other server attributes
post = {}
post['rebuild'] = {
"imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"name": "blah",
"accessIPv4": "172.19.0.2",
"accessIPv6": "fe80::2",
"metadata": {'some': 'thing'},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild']['imageRef'],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2', found_server['accessIPv4'])
self.assertEqual('fe80::2', found_server['accessIPv6'])
# rebuild the server with empty metadata and nothing else
post = {}
post['rebuild'] = {
"imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"metadata": {},
}
self.api.post_server_action(created_server_id, post)
LOG.debug("rebuilt server: %s" % created_server)
self.assertTrue(created_server['id'])
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
self.assertEqual({}, found_server.get('metadata'))
self.assertEqual('blah', found_server.get('name'))
self.assertEqual(post['rebuild']['imageRef'],
found_server.get('image')['id'])
self.assertEqual('172.19.0.2', found_server['accessIPv4'])
self.assertEqual('fe80::2', found_server['accessIPv6'])
# Cleanup
self._delete_server(created_server_id)
def test_rename_server(self):
"""Test building and renaming a server."""
self.flags(stub_network=True)
# Create a server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({'server': server})
LOG.debug("created_server: %s" % created_server)
server_id = created_server['id']
self.assertTrue(server_id)
# Rename the server to 'new-name'
self.api.put_server(server_id, {'server': {'name': 'new-name'}})
# Check the name of the server
created_server = self.api.get_server(server_id)
self.assertEqual(created_server['name'], 'new-name')
# Cleanup
self._delete_server(server_id)
def test_create_multiple_servers(self):
"""Creates multiple servers and checks for reservation_id"""
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
server = self._build_minimal_create_server_request()
server['min_count'] = 2
server['return_reservation_id'] = True
post = {'server': server}
response = self.api.post_server(post)
self.assertIn('reservation_id', response)
reservation_id = response['reservation_id']
self.assertNotIn(reservation_id, ['', None])
# Create 1 more server, which should not return a reservation_id
server = self._build_minimal_create_server_request()
post = {'server': server}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# lookup servers created by the first request.
servers = self.api.get_servers(detail=True,
search_opts={'reservation_id': reservation_id})
server_map = dict((server['id'], server) for server in servers)
found_server = server_map.get(created_server_id)
# The server from the 2nd request should not be there.
self.assertEqual(found_server, None)
# Should have found 2 servers.
self.assertEqual(len(server_map), 2)
# Cleanup
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
defionscode/ansible | lib/ansible/modules/cloud/azure/azure_rm_storageaccount_facts.py | 17 | 5827 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_storageaccount_facts
version_added: "2.1"
short_description: Get storage account facts.
description:
- Get facts for one storage account or all storage accounts within a resource group.
options:
name:
description:
- Only show results for a specific account.
resource_group:
description:
- Limit results to a resource group. Required when filtering by name.
aliases:
- resource_group_name
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one account
azure_rm_storageaccount_facts:
resource_group: Testing
name: clh0002
- name: Get facts for all accounts in a resource group
azure_rm_storageaccount_facts:
resource_group: Testing
- name: Get facts for all accounts by tags
azure_rm_storageaccount_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_storageaccounts:
description: List of storage account dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/testing/providers/Microsoft.Storage/storageAccounts/testaccount001",
"location": "eastus2",
"name": "testaccount001",
"properties": {
"accountType": "Standard_LRS",
"creationTime": "2016-03-28T02:46:58.290113Z",
"primaryEndpoints": {
"blob": "https://testaccount001.blob.core.windows.net/",
"file": "https://testaccount001.file.core.windows.net/",
"queue": "https://testaccount001.queue.core.windows.net/",
"table": "https://testaccount001.table.core.windows.net/"
},
"primaryLocation": "eastus2",
"provisioningState": "Succeeded",
"statusOfPrimary": "Available"
},
"tags": {},
"type": "Microsoft.Storage/storageAccounts"
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'StorageAccount'
class AzureRMStorageAccountFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str', aliases=['resource_group_name']),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_storageaccounts=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMStorageAccountFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_storageaccounts'] = self.get_account()
elif self.resource_group:
self.results['ansible_facts']['azure_storageaccounts'] = self.list_resource_group()
else:
self.results['ansible_facts']['azure_storageaccounts'] = self.list_all()
return self.results
def get_account(self):
self.log('Get properties for account {0}'.format(self.name))
account = None
result = []
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
except CloudError:
pass
if account and self.has_tags(account.tags, self.tags):
result = [self.serialize_obj(account, AZURE_OBJECT_CLASS)]
return result
def list_resource_group(self):
self.log('List items')
try:
response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_all(self):
self.log('List all items')
try:
response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMStorageAccountFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
ramanajee/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/user.py | 128 | 6864 | # Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getpass
import logging
import os
import platform
import re
import shlex
import subprocess
import sys
import webbrowser
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.platforminfo import PlatformInfo
_log = logging.getLogger(__name__)
try:
import readline
except ImportError:
if sys.platform != "win32":
# There is no readline module for win32, not much to do except cry.
_log.warn("Unable to import readline.")
class User(object):
DEFAULT_NO = 'n'
DEFAULT_YES = 'y'
def __init__(self, platforminfo=None):
# We cannot get the PlatformInfo object from a SystemHost because
# User is part of SystemHost itself.
self._platforminfo = platforminfo or PlatformInfo(sys, platform, Executive())
# FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance).
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
response = None
while (repeat and not response):
repeat -= 1
response = raw_input(message)
return response
@classmethod
def prompt_password(cls, message, repeat=1):
return cls.prompt(message, repeat=repeat, raw_input=getpass.getpass)
@classmethod
def prompt_with_multiple_lists(cls, list_title, subtitles, lists, can_choose_multiple=False, raw_input=raw_input):
item_index = 0
cumulated_list = []
print list_title
for i in range(len(subtitles)):
print "\n" + subtitles[i]
for item in lists[i]:
item_index += 1
print "%2d. %s" % (item_index, item)
cumulated_list += lists[i]
return cls._wait_on_list_response(cumulated_list, can_choose_multiple, raw_input)
@classmethod
def _wait_on_list_response(cls, list_items, can_choose_multiple, raw_input):
while True:
if can_choose_multiple:
response = cls.prompt("Enter one or more numbers (comma-separated) or ranges (e.g. 3-7), or \"all\": ", raw_input=raw_input)
if not response.strip() or response == "all":
return list_items
try:
indices = []
for value in re.split("\s*,\s*", response):
parts = value.split('-')
if len(parts) == 2:
indices += range(int(parts[0]) - 1, int(parts[1]))
else:
indices.append(int(value) - 1)
except ValueError, err:
continue
return [list_items[i] for i in indices]
else:
try:
result = int(cls.prompt("Enter a number: ", raw_input=raw_input)) - 1
except ValueError, err:
continue
return list_items[result]
@classmethod
def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
print list_title
i = 0
for item in list_items:
i += 1
print "%2d. %s" % (i, item)
return cls._wait_on_list_response(list_items, can_choose_multiple, raw_input)
def edit(self, files):
editor = os.environ.get("EDITOR") or "vi"
args = shlex.split(editor)
# Note: Not thread safe: http://bugs.python.org/issue2320
subprocess.call(args + files)
def _warn_if_application_is_xcode(self, edit_application):
if "Xcode" in edit_application:
print "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\"."
def edit_changelog(self, files):
edit_application = os.environ.get("CHANGE_LOG_EDIT_APPLICATION")
if edit_application and self._platforminfo.is_mac():
# On Mac we support editing ChangeLogs using an application.
args = shlex.split(edit_application)
print "Using editor in the CHANGE_LOG_EDIT_APPLICATION environment variable."
print "Please quit the editor application when done editing."
self._warn_if_application_is_xcode(edit_application)
subprocess.call(["open", "-W", "-n", "-a"] + args + files)
return
self.edit(files)
def page(self, message):
pager = os.environ.get("PAGER") or "less"
try:
# Note: Not thread safe: http://bugs.python.org/issue2320
child_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
child_process.communicate(input=message)
except IOError, e:
pass
def confirm(self, message=None, default=DEFAULT_YES, raw_input=raw_input):
if not message:
message = "Continue?"
choice = {'y': 'Y/n', 'n': 'y/N'}[default]
response = raw_input("%s [%s]: " % (message, choice))
if not response:
response = default
return response.lower() == 'y'
def can_open_url(self):
try:
webbrowser.get()
return True
except webbrowser.Error, e:
return False
def open_url(self, url):
if not self.can_open_url():
_log.warn("Failed to open %s" % url)
webbrowser.open(url)
| bsd-3-clause |
tedder/ansible-modules-core | cloud/digital_ocean/digital_ocean_sshkey.py | 31 | 5129 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey:
state: present
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
client_id: XXX
api_key: XXX
'''
import os
import time
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
assefay/inasafe | realtime/rt_exceptions.py | 3 | 2599 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Realtime Exception Classes.**
Custom exception classes for the IS application.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'tim@linfiniti.com'
__version__ = '0.5.0'
__revision__ = '$Format:%H$'
__date__ = '31/07/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
class FileNotFoundError(Exception):
"""Exception for when a file could not be found."""
pass
class EventIdError(Exception):
"""Exceptions relating to null or incorrect event id's"""
pass
class EventUndefinedError(Exception):
"""Exception for when trying to work with an event that is not defined."""
pass
class NetworkError(Exception):
"""Exception for when trying to fetch a remote resource and failing."""
pass
class EventValidationError(Exception):
"""Exception for when an event is deemed to be invalid - typically for
when no matching event can be located on the server or local filesystem
cache."""
pass
class InvalidInputZipError(Exception):
"""A exception for when the inp zip is invalid."""
pass
class InvalidOutputZipError(Exception):
"""An exception for then the out zip is invalid."""
pass
class ExtractionError(Exception):
"""An exception for when something went wrong extracting the event and mi
datasets"""
pass
class ContourCreationError(Exception):
"""An exception for when creating contours from shakemaps goes wrong"""
pass
class GridXmlParseError(Exception):
"""An exception for when something went wrong parsing the grid.xml """
pass
class GridXmlFileNotFoundError(Exception):
"""An exception for when an grid.xml could not be found"""
pass
class InvalidLayerError(Exception):
"""Raised when a gis layer is invalid"""
pass
class ShapefileCreationError(Exception):
"""Raised if an error occurs creating the cities file"""
pass
class CityMemoryLayerCreationError(Exception):
"""Raised if an error occurs creating the cities memory layer"""
pass
class MapComposerError(Exception):
"""Raised if a problem occurs rendering a map"""
pass
class CopyError(Exception):
"""Raised if a problem occurs copying a file"""
pass
| gpl-3.0 |
Just-D/chromium-1 | tools/cr/cr/base/arch.py | 113 | 1544 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the basic architectures supported by cr."""
import cr
DEFAULT = cr.Config.From(
CR_ENVSETUP_ARCH='{CR_ARCH}',
)
class Arch(cr.Plugin, cr.Plugin.Type):
"""Base class for implementing cr architecture targets."""
SELECTOR = 'CR_ARCH'
@classmethod
def AddArguments(cls, parser):
parser.add_argument(
'--architecture', dest=cls.SELECTOR,
choices=cls.Choices(),
default=None,
help='Sets the target architecture to use. Overrides ' + cls.SELECTOR
)
class IA32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='ia32',
)
class Mips32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='mipsel',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class X64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='x64',
)
@property
def priority(self):
return super(X64Arch, self).priority + 1
class Arm32Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm',
)
@property
def priority(self):
return super(Arm32Arch, self).priority + 2
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
class Arm64Arch(Arch):
ACTIVE = cr.Config.From(
CR_ENVSETUP_ARCH='arm64',
)
@property
def enabled(self):
return cr.AndroidPlatform.GetInstance().is_active
| bsd-3-clause |
AEgisTG/jsbsim | tests/TestModelLoading.py | 3 | 7762 | # TestModelLoading.py
#
# A regression test that checks if the model inclusion with the attribute
# 'file=' is working.
#
# Copyright (c) 2014 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import os, sys, unittest
import xml.etree.ElementTree as et
from JSBSim_utils import Table, CreateFDM, ExecuteUntil, SandBox, append_xml, CopyAircraftDef
class TestModelLoading(unittest.TestCase):
def setUp(self):
self.sandbox = SandBox()
def tearDown(self):
self.sandbox.erase()
def BuildReference(self, script_name):
# Run the script
self.script = self.sandbox.path_to_jsbsim_file(os.path.join('scripts',
script_name))
self.sandbox.delete_csv_files()
fdm = CreateFDM(self.sandbox)
fdm.set_output_directive(self.sandbox.path_to_jsbsim_file('tests',
'output.xml'))
fdm.load_script(self.script)
fdm.set_property_value('simulation/randomseed', 0.0)
fdm.run_ic()
ExecuteUntil(fdm, 50.0)
self.ref = Table()
self.ref.ReadCSV(self.sandbox("output.csv"))
# Since the script will work with modified versions of the aircraft XML
# definition file, we need to make a copy of the directory that contains
# all the input data of that aircraft
tree, self.aircraft_name, self.path_to_jsbsim_aircrafts = CopyAircraftDef(self.script, self.sandbox)
self.aircraft_path = self.sandbox('aircraft', self.aircraft_name)
def ProcessAndCompare(self, section):
# Here we determine if the original aircraft definition <section> is
# inline or read from an external file.
tree = et.parse(os.path.join(self.path_to_jsbsim_aircrafts,
self.aircraft_name + '.xml'))
root = tree.getroot()
# Iterate over all the tags named <section>
for section_element in root.findall(section):
if 'file' in section_element.keys():
self.InsertAndCompare(section_element, tree)
else:
self.DetachAndCompare(section_element, tree)
def DetachAndCompare(self, section_element, tree):
# Extract <section> from the original aircraft definition file and copy
# it in a separate XML file 'section.xml'
section_tree = et.ElementTree(element=section_element)
if 'name' in section_element.keys():
section = section_element.attrib['name']
else:
section = section_element.tag
section_tree.write(os.path.join(self.aircraft_path, section+'.xml'),
xml_declaration=True)
# Now, we need to clean up the aircraft definition file from all
# references to <section>. We just need a single <section> tag that
# points to the file 'section.xml'
for element in list(section_element):
section_element.remove(element)
section_element.attrib = {'file': section+'.xml'}
tree.write(os.path.join(self.aircraft_path, self.aircraft_name+'.xml'),
xml_declaration=True)
self.Compare(section)
def InsertAndCompare(self, section_element, tree):
file_name = append_xml(section_element.attrib['file'])
section_file = os.path.join(self.path_to_jsbsim_aircrafts, file_name)
# If <section> is actually <system>, we need to iterate over all the
# directories in which the file is allowed to be stored until the file
# is located.
if not os.path.exists(section_file) and section_element.tag == 'system':
section_file = os.path.join(self.path_to_jsbsim_aircrafts, "systems", file_name)
if not os.path.exists(section_file):
section_file = self.sandbox.elude(self.sandbox.path_to_jsbsim_file("systems", file_name))
# The original <section> tag is dropped and replaced by the content of
# the file.
section_root = et.parse(section_file).getroot()
del section_element.attrib['file']
section_element.attrib.update(section_root.attrib)
section_element.extend(section_root)
tree.write(os.path.join(self.aircraft_path, self.aircraft_name+'.xml'))
self.Compare(section_element.tag+" file:"+section_file)
def Compare(self, section):
# Rerun the script with the modified aircraft definition
self.sandbox.delete_csv_files()
fdm = CreateFDM(self.sandbox)
# We need to tell JSBSim that the aircraft definition is located in the
# directory build/.../aircraft
fdm.set_aircraft_path('aircraft')
fdm.set_output_directive(self.sandbox.path_to_jsbsim_file('tests', 'output.xml'))
fdm.load_script(self.script)
fdm.set_property_value('simulation/randomseed', 0.0)
fdm.run_ic()
ExecuteUntil(fdm, 50.0)
mod = Table()
mod.ReadCSV(self.sandbox('output.csv'))
# Whether the data is read from the aircraft definition file or from an
# external file, the results shall be exactly identical. Hence the
# precision set to 0.0.
diff = self.ref.compare(mod, 0.0)
self.assertTrue(diff.empty(),
msg='\nTesting section "'+section+'"\n'+repr(diff))
def test_model_loading(self):
self.longMessage = True
self.BuildReference('c1724.xml')
output_ref = Table()
output_ref.ReadCSV(self.sandbox('JSBout172B.csv'))
self.ProcessAndCompare('aerodynamics')
self.ProcessAndCompare('autopilot')
self.ProcessAndCompare('flight_control')
self.ProcessAndCompare('ground_reactions')
self.ProcessAndCompare('mass_balance')
self.ProcessAndCompare('metrics')
self.ProcessAndCompare('propulsion')
self.ProcessAndCompare('system')
# The <output> section needs special handling. In addition to the check
# conducted by ProcessAndCompare with a directive file, we need to
# verify that the <output> tag has been correctly executed by JSBSim.
# In the case of the script c1724.xml, this means that the data output
# in JSBout172B.csv is the same between the reference 'output_ref' and
# the result 'mod' below where the <output> tag was moved in a separate
# file.
self.ProcessAndCompare('output')
mod = Table()
mod.ReadCSV(self.sandbox('JSBout172B.csv'))
diff = output_ref.compare(mod, 0.0)
self.assertTrue(diff.empty(),
msg='\nTesting section "output"\n'+repr(diff))
self.BuildReference('weather-balloon.xml')
self.ProcessAndCompare('buoyant_forces')
self.BuildReference('Concorde_runway_test.xml')
self.ProcessAndCompare('external_reactions')
suite = unittest.TestLoader().loadTestsFromTestCase(TestModelLoading)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
if test_result.failures or test_result.errors:
sys.exit(-1) # 'make test' will report the test failed.
| lgpl-2.1 |
fabianfreyer/ansible | lib/ansible/plugins/lookup/redis_kv.py | 69 | 2504 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
if not isinstance(terms, list):
terms = [ terms ]
ret = []
for term in terms:
(url,key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
| gpl-3.0 |
renhaoqi/gem5-stable | util/checkpoint_aggregator.py | 64 | 6198 | # Copyright (c) 2009 The Regents of The University of Michigan
# Copyright (c) 2011 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
# Nilay Vaish
from ConfigParser import ConfigParser
import gzip
import sys, re, os
class myCP(ConfigParser):
def __init__(self):
ConfigParser.__init__(self)
def optionxform(self, optionstr):
return optionstr
def aggregate(output_dir, cpts, no_compress, memory_size):
merged_config = None
page_ptr = 0
output_path = output_dir
if not os.path.isdir(output_path):
os.system("mkdir -p " + output_path)
agg_mem_file = open(output_path + "/system.physmem.store0.pmem", "wb+")
agg_config_file = open(output_path + "/m5.cpt", "wb+")
if not no_compress:
merged_mem = gzip.GzipFile(fileobj= agg_mem_file, mode="wb")
max_curtick = 0
num_digits = len(str(len(cpts)-1))
for (i, arg) in enumerate(cpts):
print arg
merged_config = myCP()
config = myCP()
config.readfp(open(cpts[i] + "/m5.cpt"))
for sec in config.sections():
if re.compile("cpu").search(sec):
newsec = re.sub("cpu", "cpu" + str(i).zfill(num_digits), sec)
merged_config.add_section(newsec)
items = config.items(sec)
for item in items:
if item[0] == "paddr":
merged_config.set(newsec, item[0], int(item[1]) + (page_ptr << 12))
continue
merged_config.set(newsec, item[0], item[1])
if re.compile("workload.FdMap256$").search(sec):
merged_config.set(newsec, "M5_pid", i)
elif sec == "system":
pass
elif sec == "Globals":
tick = config.getint(sec, "curTick")
if tick > max_curtick:
max_curtick = tick
else:
if i == len(cpts)-1:
merged_config.add_section(sec)
for item in config.items(sec):
merged_config.set(sec, item[0], item[1])
if i != len(cpts)-1:
merged_config.write(agg_config_file)
### memory stuff
pages = int(config.get("system", "pagePtr"))
page_ptr = page_ptr + pages
print "pages to be read: ", pages
f = open(cpts[i] + "/system.physmem.store0.pmem", "rb")
gf = gzip.GzipFile(fileobj=f, mode="rb")
x = 0
while x < pages:
bytesRead = gf.read(1 << 12)
if not no_compress:
merged_mem.write(bytesRead)
else:
agg_mem_file.write(bytesRead)
x += 1
gf.close()
f.close()
merged_config.add_section("system")
merged_config.set("system", "pagePtr", page_ptr)
merged_config.set("system", "nextPID", len(cpts))
file_size = page_ptr * 4 * 1024
dummy_data = "".zfill(4096)
while file_size < memory_size:
if not no_compress:
merged_mem.write(dummy_data)
else:
agg_mem_file.write(dummy_data)
file_size += 4 * 1024
page_ptr += 1
print "WARNING: "
print "Make sure the simulation using this checkpoint has at least ",
print page_ptr, "x 4K of memory"
merged_config.set("system.physmem.store0", "range_size", page_ptr * 4 * 1024)
merged_config.add_section("Globals")
merged_config.set("Globals", "curTick", max_curtick)
merged_config.write(agg_config_file)
if not no_compress:
merged_mem.close()
agg_mem_file.close()
else:
agg_mem_file.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser("usage: %prog [options] <directory names which "\
"hold the checkpoints to be combined>")
parser.add_argument("-o", "--output-dir", action="store",
help="Output directory")
parser.add_argument("-c", "--no-compress", action="store_true")
parser.add_argument("--cpts", nargs='+')
parser.add_argument("--memory-size", action="store", type=int)
# Assume x86 ISA. Any other ISAs would need extra stuff in this script
# to appropriately parse their page tables and understand page sizes.
options = parser.parse_args()
print options.cpts, len(options.cpts)
if len(options.cpts) <= 1:
parser.error("You must specify atleast two checkpoint files that "\
"need to be combined.")
aggregate(options.output_dir, options.cpts, options.no_compress,
options.memory_size)
| bsd-3-clause |
google-research/language | language/conpono/reconstruct/model_builder.py | 1 | 9078 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the paragraph reconstruction model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from bert import modeling
import tensorflow.compat.v1 as tf
from tensorflow.contrib import seq2seq as contrib_seq2seq
class FixedSizeInferenceHelper(contrib_seq2seq.InferenceHelper):
"""Feeds in the output of the decoder at each step for fixed size."""
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for TrainingHelper."""
return (finished, sample_ids, state)
def create_model(model,
labels,
decoder_inputs,
batch_size,
model_type="decode",
sep_positions=None):
"""Creates a classification model.
Args:
model: the BERT model from modeling.py
labels: ground truth paragraph order
decoder_inputs: the input to the decoder if used
batch_size: the batch size
model_type: one of decode, pooled, attn
sep_positions: (optional) for "pooled" indecies of SEP tokens
Returns:
tuple of (loss, per_example_loss, logits, probabilities) for model
"""
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
tpu_batch_size = tf.shape(output_layer)[0]
num_labels = 5 # GOOGLE-INTERNAL TODO(daniter) this shouldn't be hardcoded
with tf.variable_scope("paragraph_reconstruct"):
if model_type == "decode":
lstm_cell = tf.nn.rnn_cell.LSTMCell(
num_units=hidden_size, use_peepholes=True, state_is_tuple=True)
def sample_fn(x):
return tf.to_float(tf.reshape(tf.argmax(x, axis=-1), (-1, 1)))
helper = FixedSizeInferenceHelper(
sample_fn=sample_fn,
sample_shape=[1],
sample_dtype=tf.float32,
start_inputs=decoder_inputs[:, 0],
end_fn=None)
# Decoder
project_layer = tf.layers.Dense(
num_labels, use_bias=False, name="output_projection")
my_decoder = contrib_seq2seq.BasicDecoder(
lstm_cell,
helper,
tf.nn.rnn_cell.LSTMStateTuple(output_layer, output_layer),
output_layer=project_layer)
# Dynamic decoding
outputs, _, _ = contrib_seq2seq.dynamic_decode(
my_decoder,
swap_memory=True,
scope="paragraph_reconstruct",
maximum_iterations=5)
logits = outputs.rnn_output
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
# GOOGLE-INTERAL: TODO(daniter) currently neither of these actually train
elif model_type == "pooled":
token_embeddings = model.get_sequence_output()
# sep positions come out batch by batch so we need to add the batch index
# we do that explicitly here since we don't know the batch size in the
# record decoder
batch_idx = tf.range(tpu_batch_size)
batch_idx = tf.reshape(batch_idx, [tpu_batch_size, 1])
batch_idx = tf.tile(batch_idx, [1, 5]) # double check
batch_idx = tf.reshape(batch_idx, [tpu_batch_size, 5, 1])
# batch_idx = tf.Print(batch_idx, [batch_idx],
# message="batch_idx", summarize=999999)
sep_positions = tf.concat([batch_idx, sep_positions], axis=2)
# sep_positions = tf.Print(sep_positions, [sep_positions],
# message="sep_positions", summarize=999999)
sep_vecs = tf.gather_nd(token_embeddings, sep_positions)
sep_vecs = tf.reshape(sep_vecs, [tpu_batch_size, 5, hidden_size])
# sep_vecs = tf.Print(sep_vecs, [sep_vecs], message="sep_vecs",
# summarize=999999)
logits = tf.layers.dense(
inputs=sep_vecs, units=num_labels, name="output_projection")
# logits = tf.Print(logits, [logits], message="logits", summarize=999999)
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
elif model_type == "attn":
# change size to match sequence embedding size
input_consts = tf.constant([0, 1, 2, 3, 4])
position_encoding = tf.broadcast_to(input_consts, [tpu_batch_size, 5])
# position_encoding = tf.to_float(
# tf.reshape(position_encoding, (-1, 5, 1)))
token_type_table = tf.get_variable(
name="attention_embedding",
shape=[5, 512], # don't hardcode
initializer=tf.truncated_normal_initializer(stddev=0.02))
# This vocab will be small so we always do one-hot here, since it is
# always faster for a small vocabulary.
flat_token_type_ids = tf.reshape(position_encoding, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=5)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[tpu_batch_size, 5, 512])
token_embeddings = model.get_sequence_output()
attn = modeling.attention_layer(token_type_embeddings, token_embeddings)
attn = tf.reshape(attn, (-1, 5, 512)) # head size
logits = tf.layers.dense(
inputs=attn, units=num_labels, name="output_projection")
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
return (loss, per_example_loss, logits, probabilities)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
| apache-2.0 |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/werkzeug/exceptions.py | 148 | 18577 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and there '
'is no forwarding address. If you followed a link from a foreign '
'page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = (
'The connection to an upstream server timed out.'
)
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
'The server does not support the HTTP protocol version used in the '
'request.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
__all__.append(obj.__name__)
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| mit |
quantosauros/cppyProject | cppy/cybosPlus/cpRqRp/StockOrderCash.py | 1 | 1827 | # coding=utf-8
'''
Created on 2016. 8. 14.
@author: Jay
'''
from cppy.adaptor import CpRqRpClass
import win32com.client
@CpRqRpClass('CpTrade.CpTd0311')
class StockOrderCash(object):
'''
장내주식/코스닥주식/ELW 주문(현금주문) 데이터를 요청하고 수신한다.
'''
def __init__(self):
self.instCpTdUtil = win32com.client.Dispatch("CpTrade.CpTdUtil")
class InputType(enumerate):
SellOrBuy = 0 #주문종류코드 (1: 매도, 2:매수)
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
class OutputType(enumerate):
AccountNumber = 1 #계좌번호
StockCode = 3 #종목코드
OrderNumber = 4 #주문수량
OrderPrice = 5 #주문단가
def setInputValue(self, inputTypes, inputValues):
self.inputTypes = inputTypes
self.inputValues = inputValues
def setOutputValue(self, outputTypes):
self.outputTypes = outputTypes
def request(self, com_obj):
self.instCpTdUtil.TradeInit()
for i in range(len(self.inputTypes)) :
com_obj.SetInputValue(self.inputTypes[i], self.inputValues[i])
#계좌번호
accountNumber = self.instCpTdUtil.AccountNumber[0]
com_obj.SetInputValue(1, accountNumber)
com_obj.Request()
def response(self, com_obj):
result = ""
for j in range(0, len(self.outputTypes)) :
value = com_obj.GetHeaderValue(self.outputTypes[j])
result += str(value) + "; "
print (result)
| mit |
jakew02/sp3-linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
CubicERP/geraldo | site/newsite/site-geraldo/gae_wiki/models.py | 9 | 1607 | from appengine_django.models import BaseModel
from google.appengine.ext import db
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.core.cache import cache
from django.template.loader import render_to_string
from utils.custom_models import CustomModel
import app_settings
class Wiki(CustomModel):
title = db.StringProperty()
slug = db.StringProperty()
pub_date = db.DateTimeProperty(auto_now_add=True)
description = db.TextProperty()
text = db.TextProperty()
tags = db.ListProperty(db.Category)
format = db.CategoryProperty()
published = db.BooleanProperty(default=False)
author = db.UserProperty()
template = db.StringProperty()
show_in_rss = db.BooleanProperty(default=False)
cacheable = db.BooleanProperty(default=True)
sequence = db.IntegerProperty()
old_id = db.IntegerProperty()
disable_comments = db.BooleanProperty(default=False)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('wiki', args=[self.slug])
def put(self):
if not self.slug:
self.slug = unicode(slugify(self.title))
# Delete from cache
cache.delete(Wiki.get_cache_key(self.slug))
return super(Wiki, self).put()
@classmethod
def get_cache_key(cls, slug):
return app_settings.WIKI_CACHE_KEY_PREFIX + slug
def description_or_text(self):
return self.description or self.text
def render(self):
return render_to_string('gae_wiki/render_wiki.html', {'wiki': self})
| lgpl-3.0 |
mancoast/CPythonPyc_test | fail/323_test_pty.py | 2 | 11063 | from test.support import verbose, run_unittest, import_module, reap_children
#Skip these tests if either fcntl or termios is not available
fcntl = import_module('fcntl')
import_module('termios')
import errno
import pty
import os
import sys
import select
import signal
import socket
import unittest
TEST_STRING_1 = b"I wish to buy a fish license.\n"
TEST_STRING_2 = b"For my pet fish, Eric.\n"
if verbose:
def debug(msg):
print(msg)
else:
def debug(msg):
pass
def normalize_output(data):
# Some operating systems do conversions on newline. We could possibly
# fix that by doing the appropriate termios.tcsetattr()s. I couldn't
# figure out the right combo on Tru64 and I don't have an IRIX box.
# So just normalize the output and doc the problem O/Ses by allowing
# certain combinations for some platforms, but avoid allowing other
# differences (like extra whitespace, trailing garbage, etc.)
# This is about the best we can do without getting some feedback
# from someone more knowledgable.
# OSF/1 (Tru64) apparently turns \n into \r\r\n.
if data.endswith(b'\r\r\n'):
return data.replace(b'\r\r\n', b'\n')
# IRIX apparently turns \n into \r\n.
if data.endswith(b'\r\n'):
return data.replace(b'\r\n', b'\n')
return data
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
# XXX(nnorwitz): these tests leak fds when there is an error.
class PtyTest(unittest.TestCase):
def setUp(self):
# isatty() and close() can hang on some platforms. Set an alarm
# before running the test to make sure we don't hang forever.
self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
signal.alarm(10)
def tearDown(self):
# remove alarm, restore old alarm handler
signal.alarm(0)
signal.signal(signal.SIGALRM, self.old_alarm)
def handle_sig(self, sig, frame):
self.fail("isatty hung")
def test_basic(self):
try:
debug("Calling master_open()")
master_fd, slave_name = pty.master_open()
debug("Got master_fd '%d', slave_name '%s'" %
(master_fd, slave_name))
debug("Calling slave_open(%r)" % (slave_name,))
slave_fd = pty.slave_open(slave_name)
debug("Got slave_fd '%d'" % slave_fd)
except OSError:
# " An optional feature could not be imported " ... ?
raise unittest.SkipTest("Pseudo-terminals (seemingly) not functional.")
self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty')
# Solaris requires reading the fd before anything is returned.
# My guess is that since we open and close the slave fd
# in master_open(), we need to read the EOF.
# Ensure the fd is non-blocking in case there's nothing to read.
orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL)
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK)
try:
s1 = os.read(master_fd, 1024)
self.assertEqual(b'', s1)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
# Restore the original flags.
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags)
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
s1 = os.read(master_fd, 1024)
self.assertEqual(b'I wish to buy a fish license.\n',
normalize_output(s1))
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
s2 = os.read(master_fd, 1024)
self.assertEqual(b'For my pet fish, Eric.\n', normalize_output(s2))
os.close(slave_fd)
os.close(master_fd)
def test_fork(self):
debug("calling pty.fork()")
pid, master_fd = pty.fork()
if pid == pty.CHILD:
# stdout should be connected to a tty.
if not os.isatty(1):
debug("Child's fd 1 is not a tty?!")
os._exit(3)
# After pty.fork(), the child should already be a session leader.
# (on those systems that have that concept.)
debug("In child, calling os.setsid()")
try:
os.setsid()
except OSError:
# Good, we already were session leader
debug("Good: OSError was raised.")
pass
except AttributeError:
# Have pty, but not setsid()?
debug("No setsid() available?")
pass
except:
# We don't want this error to propagate, escaping the call to
# os._exit() and causing very peculiar behavior in the calling
# regrtest.py !
# Note: could add traceback printing here.
debug("An unexpected error was raised.")
os._exit(1)
else:
debug("os.setsid() succeeded! (bad!)")
os._exit(2)
os._exit(4)
else:
debug("Waiting for child (%d) to finish." % pid)
# In verbose mode, we have to consume the debug output from the
# child or the child will block, causing this test to hang in the
# parent's waitpid() call. The child blocks after a
# platform-dependent amount of data is written to its fd. On
# Linux 2.6, it's 4000 bytes and the child won't block, but on OS
# X even the small writes in the child above will block it. Also
# on Linux, the read() will throw an OSError (input/output error)
# when it tries to read past the end of the buffer but the child's
# already exited, so catch and discard those exceptions. It's not
# worth checking for EIO.
while True:
try:
data = os.read(master_fd, 80)
except OSError:
break
if not data:
break
sys.stdout.write(str(data.replace(b'\r\n', b'\n'),
encoding='ascii'))
##line = os.read(master_fd, 80)
##lines = line.replace('\r\n', '\n').split('\n')
##if False and lines != ['In child, calling os.setsid()',
## 'Good: OSError was raised.', '']:
## raise TestFailed("Unexpected output from child: %r" % line)
(pid, status) = os.waitpid(pid, 0)
res = status >> 8
debug("Child (%d) exited with status %d (%d)." % (pid, res, status))
if res == 1:
self.fail("Child raised an unexpected exception in os.setsid()")
elif res == 2:
self.fail("pty.fork() failed to make child a session leader.")
elif res == 3:
self.fail("Child spawned by pty.fork() did not have a tty as stdout")
elif res != 4:
self.fail("pty.fork() failed for unknown reasons.")
##debug("Reading from master_fd now that the child has exited")
##try:
## s1 = os.read(master_fd, 1024)
##except os.error:
## pass
##else:
## raise TestFailed("Read from master_fd did not raise exception")
os.close(master_fd)
# pty.fork() passed.
class SmallPtyTests(unittest.TestCase):
"""These tests don't spawn children or hang."""
def setUp(self):
self.orig_stdin_fileno = pty.STDIN_FILENO
self.orig_stdout_fileno = pty.STDOUT_FILENO
self.orig_pty_select = pty.select
self.fds = [] # A list of file descriptors to close.
self.select_rfds_lengths = []
self.select_rfds_results = []
def tearDown(self):
pty.STDIN_FILENO = self.orig_stdin_fileno
pty.STDOUT_FILENO = self.orig_stdout_fileno
pty.select = self.orig_pty_select
for fd in self.fds:
try:
os.close(fd)
except:
pass
def _pipe(self):
pipe_fds = os.pipe()
self.fds.extend(pipe_fds)
return pipe_fds
def _mock_select(self, rfds, wfds, xfds):
# This will raise IndexError when no more expected calls exist.
self.assertEqual(self.select_rfds_lengths.pop(0), len(rfds))
return self.select_rfds_results.pop(0), [], []
def test__copy_to_each(self):
"""Test the normal data case on both master_fd and stdin."""
read_from_stdout_fd, mock_stdout_fd = self._pipe()
pty.STDOUT_FILENO = mock_stdout_fd
mock_stdin_fd, write_to_stdin_fd = self._pipe()
pty.STDIN_FILENO = mock_stdin_fd
socketpair = socket.socketpair()
masters = [s.fileno() for s in socketpair]
self.fds.extend(masters)
# Feed data. Smaller than PIPEBUF. These writes will not block.
os.write(masters[1], b'from master')
os.write(write_to_stdin_fd, b'from stdin')
# Expect two select calls, the last one will cause IndexError
pty.select = self._mock_select
self.select_rfds_lengths.append(2)
self.select_rfds_results.append([mock_stdin_fd, masters[0]])
self.select_rfds_lengths.append(2)
with self.assertRaises(IndexError):
pty._copy(masters[0])
# Test that the right data went to the right places.
rfds = select.select([read_from_stdout_fd, masters[1]], [], [], 0)[0]
self.assertEqual([read_from_stdout_fd, masters[1]], rfds)
self.assertEqual(os.read(read_from_stdout_fd, 20), b'from master')
self.assertEqual(os.read(masters[1], 20), b'from stdin')
def test__copy_eof_on_all(self):
"""Test the empty read EOF case on both master_fd and stdin."""
read_from_stdout_fd, mock_stdout_fd = self._pipe()
pty.STDOUT_FILENO = mock_stdout_fd
mock_stdin_fd, write_to_stdin_fd = self._pipe()
pty.STDIN_FILENO = mock_stdin_fd
socketpair = socket.socketpair()
masters = [s.fileno() for s in socketpair]
self.fds.extend(masters)
os.close(masters[1])
socketpair[1].close()
os.close(write_to_stdin_fd)
# Expect two select calls, the last one will cause IndexError
pty.select = self._mock_select
self.select_rfds_lengths.append(2)
self.select_rfds_results.append([mock_stdin_fd, masters[0]])
# We expect that both fds were removed from the fds list as they
# both encountered an EOF before the second select call.
self.select_rfds_lengths.append(0)
with self.assertRaises(IndexError):
pty._copy(masters[0])
def test_main(verbose=None):
try:
run_unittest(SmallPtyTests, PtyTest)
finally:
reap_children()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/kernel/zmq/eventloops.py | 4 | 8652 | # encoding: utf-8
"""Event loop integration for the ZeroMQ-based kernels.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
# System library imports
import zmq
# Local imports
from IPython.config.application import Application
from IPython.utils import io
#------------------------------------------------------------------------------
# Eventloops for integrating the Kernel into different GUIs
#------------------------------------------------------------------------------
def _on_os_x_10_9():
import platform
from distutils.version import LooseVersion as V
return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
def _notify_stream_qt(kernel, stream):
from IPython.external.qt_for_kernel import QtCore
if _on_os_x_10_9() and kernel._darwin_app_nap:
from IPython.external.appnope import nope_scope as context
else:
from IPython.core.interactiveshell import NoOpContext as context
def process_stream_events():
while stream.getsockopt(zmq.EVENTS) & zmq.POLLIN:
with context():
kernel.do_one_iteration()
fd = stream.getsockopt(zmq.FD)
notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app)
notifier.activated.connect(process_stream_events)
def loop_qt4(kernel):
"""Start a kernel with PyQt4 event loop integration."""
from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
kernel.app = get_app_qt4([" "])
kernel.app.setQuitOnLastWindowClosed(False)
for s in kernel.shell_streams:
_notify_stream_qt(kernel, s)
start_event_loop_qt4(kernel.app)
def loop_qt5(kernel):
"""Start a kernel with PyQt5 event loop integration"""
os.environ['QT_API'] = 'pyqt5'
return loop_qt4(kernel)
def loop_wx(kernel):
"""Start a kernel with wx event loop support."""
import wx
from IPython.lib.guisupport import start_event_loop_wx
if _on_os_x_10_9() and kernel._darwin_app_nap:
# we don't hook up App Nap contexts for Wx,
# just disable it outright.
from IPython.external.appnope import nope
nope()
doi = kernel.do_one_iteration
# Wx uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# We have to put the wx.Timer in a wx.Frame for it to fire properly.
# We make the Frame hidden when we create it in the main app below.
class TimerFrame(wx.Frame):
def __init__(self, func):
wx.Frame.__init__(self, None, -1)
self.timer = wx.Timer(self)
# Units for the timer are in milliseconds
self.timer.Start(poll_interval)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.func = func
def on_timer(self, event):
self.func()
# We need a custom wx.App to create our Frame subclass that has the
# wx.Timer to drive the ZMQ event loop.
class IPWxApp(wx.App):
def OnInit(self):
self.frame = TimerFrame(doi)
self.frame.Show(False)
return True
# The redirect=False here makes sure that wx doesn't replace
# sys.stdout/stderr with its own classes.
kernel.app = IPWxApp(redirect=False)
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
import signal
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
start_event_loop_wx(kernel.app)
def loop_tk(kernel):
"""Start a kernel with the Tk event loop."""
try:
from tkinter import Tk # Py 3
except ImportError:
from Tkinter import Tk # Py 2
doi = kernel.do_one_iteration
# Tk uses milliseconds
poll_interval = int(1000*kernel._poll_interval)
# For Tkinter, we create a Tk object and call its withdraw method.
class Timer(object):
def __init__(self, func):
self.app = Tk()
self.app.withdraw()
self.func = func
def on_timer(self):
self.func()
self.app.after(poll_interval, self.on_timer)
def start(self):
self.on_timer() # Call it once to get things going.
self.app.mainloop()
kernel.timer = Timer(doi)
kernel.timer.start()
def loop_gtk(kernel):
"""Start the kernel, coordinating with the GTK event loop"""
from .gui.gtkembed import GTKEmbed
gtk_kernel = GTKEmbed(kernel)
gtk_kernel.start()
def loop_cocoa(kernel):
"""Start the kernel, coordinating with the Cocoa CFRunLoop event loop
via the matplotlib MacOSX backend.
"""
import matplotlib
if matplotlib.__version__ < '1.1.0':
kernel.log.warn(
"MacOSX backend in matplotlib %s doesn't have a Timer, "
"falling back on Tk for CFRunLoop integration. Note that "
"even this won't work if Tk is linked against X11 instead of "
"Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, "
"you must use matplotlib >= 1.1.0, or a native libtk."
)
return loop_tk(kernel)
from matplotlib.backends.backend_macosx import TimerMac, show
# scale interval for sec->ms
poll_interval = int(1000*kernel._poll_interval)
real_excepthook = sys.excepthook
def handle_int(etype, value, tb):
"""don't let KeyboardInterrupts look like crashes"""
if etype is KeyboardInterrupt:
io.raw_print("KeyboardInterrupt caught in CFRunLoop")
else:
real_excepthook(etype, value, tb)
# add doi() as a Timer to the CFRunLoop
def doi():
# restore excepthook during IPython code
sys.excepthook = real_excepthook
kernel.do_one_iteration()
# and back:
sys.excepthook = handle_int
t = TimerMac(poll_interval)
t.add_callback(doi)
t.start()
# but still need a Poller for when there are no active windows,
# during which time mainloop() returns immediately
poller = zmq.Poller()
if kernel.control_stream:
poller.register(kernel.control_stream.socket, zmq.POLLIN)
for stream in kernel.shell_streams:
poller.register(stream.socket, zmq.POLLIN)
while True:
try:
# double nested try/except, to properly catch KeyboardInterrupt
# due to pyzmq Issue #130
try:
# don't let interrupts during mainloop invoke crash_handler:
sys.excepthook = handle_int
show.mainloop()
sys.excepthook = real_excepthook
# use poller if mainloop returned (no windows)
# scale by extra factor of 10, since it's a real poll
poller.poll(10*poll_interval)
kernel.do_one_iteration()
except:
raise
except KeyboardInterrupt:
# Ctrl-C shouldn't crash the kernel
io.raw_print("KeyboardInterrupt caught in kernel")
finally:
# ensure excepthook is restored
sys.excepthook = real_excepthook
# mapping of keys to loop functions
loop_map = {
'qt' : loop_qt4,
'qt4': loop_qt4,
'qt5': loop_qt5,
'inline': None,
'nbagg': None,
'osx': loop_cocoa,
'wx' : loop_wx,
'tk' : loop_tk,
'gtk': loop_gtk,
None : None,
}
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys())
raise ValueError(e)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if loop and kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop
| bsd-3-clause |
ACS-Community/ACS | LGPL/CommonSoftware/acspycommon/src/Acspy/Util/NamingService.py | 3 | 4456 | # @(#) $Id: NamingService.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $
#
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities, Inc. Washington DC, USA, 2001
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__revision__ = "$Id: NamingService.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $"
'''
TODO:
- update documentation to be consistent with the rest of Acspy package
'''
from omniORB import CORBA
from Acspy.Util.ACSCorba import nameService
import CosNaming
from traceback import print_exc
#--------------------------------------------------------------------------
class NamingServiceUtil(object):
"""
This class merely provides simplified access to the Naming Service. It
provides the following functions:
__init__ - attach to the naming service
addObject - put an object in the naming service
getObject - retrieve an object from the naming service
In practice the NamingService is much more capable (e.g. hierarchical), but
less convenient to use.
"""
#--------------------------------------------------------------------------
def __init__(self, nameServiceArgs):
"""
Attach to the Naming Service at the supplied host and port number. An
exception is thrown if this cannot be accomplished (probably the naming
service is not running).
"""
self.__nsroot = nameService()
return
#--------------------------------------------------------------------------
def addObject(self, name, object):
"""
Add object to the Naming Service with name=name and kind="". Note that
hierarchies are not allowed for, i.e. the objects are placed in the
first level. object must be a CORBA object, not an arbitrary Python
object. An exception is thrown on error.
"""
# TODO - check types
context = self.__nsroot
# Following the bible, do not use kind.
name = [CosNaming.NameComponent(name, "")]
try:
context.bind(name, object)
except CosNaming.NamingContext.AlreadyBound:
# If name already exists, silently rebind it
try:
context.rebind(name, object)
except CORBA.Exception, e:
print_exc()
raise "addObject - could not rebind object. Use ._this?"
#--------------------------------------------------------------------------
def getObject(self, name):
"""
Return an object named "name" from the Naming Service. Kind must be
equal to "". None is returned if the object cannot be obtained.
In general you will have to narrow the object reference after you
obtain it. Only the first level is searched, not hierarchies.
TODO - Search for any kind?
"""
context = self.__nsroot
name = [CosNaming.NameComponent(name, "")]
try:
object = context.resolve(name)
except Exception, e:
object = None # Probably: does not exist in the naming service
return object
#--------------------------------------------------------------------------
def delObject(self, name, kind):
"""
Unbind something from the Naming Service
"""
context = self.__nsroot
name = [CosNaming.NameComponent(name, kind)]
try:
object = context.unbind(name)
except Exception, e:
object = None # Probably: does not exist in the naming service
print e
return object
| lgpl-2.1 |
karllessard/tensorflow | tensorflow/python/data/experimental/ops/writers.py | 16 | 4486 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for tf.data writers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.TFRecordWriter")
class TFRecordWriter(object):
"""Writes a dataset to a TFRecord file.
The elements of the dataset must be scalar strings. To serialize dataset
elements as strings, you can use the `tf.io.serialize_tensor` function.
```python
dataset = tf.data.Dataset.range(3)
dataset = dataset.map(tf.io.serialize_tensor)
writer = tf.data.experimental.TFRecordWriter("/path/to/file.tfrecord")
writer.write(dataset)
```
To read back the elements, use `TFRecordDataset`.
```python
dataset = tf.data.TFRecordDataset("/path/to/file.tfrecord")
dataset = dataset.map(lambda x: tf.io.parse_tensor(x, tf.int64))
```
To shard a `dataset` across multiple TFRecord files:
```python
dataset = ... # dataset to be written
def reduce_func(key, dataset):
filename = tf.strings.join([PATH_PREFIX, tf.strings.as_string(key)])
writer = tf.data.experimental.TFRecordWriter(filename)
writer.write(dataset.map(lambda _, x: x))
return tf.data.Dataset.from_tensors(filename)
dataset = dataset.enumerate()
dataset = dataset.apply(tf.data.experimental.group_by_window(
lambda i, _: i % NUM_SHARDS, reduce_func, tf.int64.max
))
```
"""
def __init__(self, filename, compression_type=None):
"""Initializes a `TFRecordWriter`.
Args:
filename: a string path indicating where to write the TFRecord data.
compression_type: (Optional.) a string indicating what type of compression
to use when writing the file. See `tf.io.TFRecordCompressionType` for
what types of compression are available. Defaults to `None`.
"""
self._filename = ops.convert_to_tensor(
filename, dtypes.string, name="filename")
self._compression_type = convert.optional_param_to_tensor(
"compression_type",
compression_type,
argument_default="",
argument_dtype=dtypes.string)
def write(self, dataset):
"""Writes a dataset to a TFRecord file.
An operation that writes the content of the specified dataset to the file
specified in the constructor.
If the file exists, it will be overwritten.
Args:
dataset: a `tf.data.Dataset` whose elements are to be written to a file
Returns:
In graph mode, this returns an operation which when executed performs the
write. In eager mode, the write is performed by the method itself and
there is no return value.
Raises
TypeError: if `dataset` is not a `tf.data.Dataset`.
TypeError: if the elements produced by the dataset are not scalar strings.
"""
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
if not dataset_ops.get_structure(dataset).is_compatible_with(
tensor_spec.TensorSpec([], dtypes.string)):
raise TypeError(
"`dataset` must produce scalar `DT_STRING` tensors whereas it "
"produces shape {0} and types {1}".format(
dataset_ops.get_legacy_output_shapes(dataset),
dataset_ops.get_legacy_output_types(dataset)))
return gen_experimental_dataset_ops.dataset_to_tf_record(
dataset._variant_tensor, self._filename, self._compression_type) # pylint: disable=protected-access
| apache-2.0 |
programadorjc/django | tests/expressions/models.py | 261 | 1925 | """
Tests for F() query expression syntax.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Employee(models.Model):
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
salary = models.IntegerField(blank=True, null=True)
def __str__(self):
return '%s %s' % (self.firstname, self.lastname)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=100)
num_employees = models.PositiveIntegerField()
num_chairs = models.PositiveIntegerField()
ceo = models.ForeignKey(
Employee,
models.CASCADE,
related_name='company_ceo_set')
point_of_contact = models.ForeignKey(
Employee,
models.SET_NULL,
related_name='company_point_of_contact_set',
null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Number(models.Model):
integer = models.BigIntegerField(db_column='the_integer')
float = models.FloatField(null=True, db_column='the_float')
def __str__(self):
return '%i, %.3f' % (self.integer, self.float)
class Experiment(models.Model):
name = models.CharField(max_length=24)
assigned = models.DateField()
completed = models.DateField()
estimated_time = models.DurationField()
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
ordering = ('name',)
def duration(self):
return self.end - self.start
@python_2_unicode_compatible
class Time(models.Model):
time = models.TimeField(null=True)
def __str__(self):
return "%s" % self.time
@python_2_unicode_compatible
class UUID(models.Model):
uuid = models.UUIDField(null=True)
def __str__(self):
return "%s" % self.uuid
| bsd-3-clause |
rcarrillocruz/ansible | lib/ansible/modules/windows/win_dsc.py | 22 | 3414 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Trond Hindenes <trond@hindenes.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_dsc
version_added: "2.4"
short_description: Invokes a PowerShell DSC configuration
description:
- Invokes a PowerShell DSC Configuration. Requires PowerShell version 5 (February release or newer).
- Most of the parameters for this module are dynamic and will vary depending on the DSC Resource.
options:
resource_name:
description:
- The DSC Resource to use. Must be accessible to PowerShell using any of the default paths.
required: true
module_version:
description:
- Can be used to configure the exact version of the dsc resource to be invoked.
- Useful if the target node has multiple versions installed of the module containing the DSC resource.
- If not specified, the module will follow standard Powershell convention and use the highest version available.
default: latest
author: Trond Hindenes
'''
EXAMPLES = r'''
# Playbook example
- name: Extract zip file
win_dsc:
resource_name: archive
ensure: Present
path: "C:\\Temp\\zipfile.zip"
destination: "C:\\Temp\\Temp2"
- name: Invoke DSC with check mode
win_dsc:
resource_name: windowsfeature
name: telnet-client
'''
RETURN = r'''
resource_name:
description: The name of the invoked resource
returned: always
type: string
sample: windowsfeature
module_version:
description: The version of the dsc resource/module used.
returned: success
type: string
sample: "1.0.1"
attributes:
description: The attributes/parameters passed in to the DSC resource as key/value pairs
returned: always
type: complex
sample:
contains:
Key:
description: Attribute key
Value:
description: Attribute value
dsc_attributes:
description: The attributes/parameters as returned from the DSC engine in dict format
returned: always
type: complex
contains:
Key:
description: Attribute key
Value:
description: Attribute value
reboot_required:
description: flag returned from the DSC engine indicating whether or not the machine requires a reboot for the invoked changes to take effect
returned: always
type: boolean
sample: True
message:
description: any error message from invoking the DSC resource
returned: error
type: string
sample: Multiple DSC modules found with resource name xyz
'''
| gpl-3.0 |
MakeHer/edx-platform | lms/djangoapps/verify_student/tests/test_fake_software_secure.py | 35 | 2724 | """
Tests for the fake software secure response.
"""
from django.test import TestCase
from mock import patch
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
class SoftwareSecureFakeViewTest(UrlResetMixin, TestCase):
"""
Base class to test the fake software secure view.
"""
def setUp(self, **kwargs):
enable_software_secure_fake = kwargs.get('enable_software_secure_fake', False)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_SOFTWARE_SECURE_FAKE': enable_software_secure_fake}):
super(SoftwareSecureFakeViewTest, self).setUp('verify_student.urls')
self.user = UserFactory.create(username="test", password="test")
self.attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
self.client.login(username="test", password="test")
class SoftwareSecureFakeViewDisabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewDisabledTest, self).setUp(enable_software_secure_fake=False)
def test_get_method_without_enable_feature_flag(self):
"""
Test that the user gets 404 response if the feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is not enabled.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 404)
class SoftwareSecureFakeViewEnabledTest(SoftwareSecureFakeViewTest):
"""
Test the fake software secure response when feature flag
'ENABLE_SOFTWARE_SECURE_FAKE' is enabled.
"""
def setUp(self):
super(SoftwareSecureFakeViewEnabledTest, self).setUp(enable_software_secure_fake=True)
def test_get_method_without_logged_in_user(self):
"""
Test that the user gets 302 response if that user is not logged in.
"""
self.client.logout()
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 302)
def test_get_method(self):
"""
Test that GET method of fake software secure view uses the most recent
attempt for the logged-in user.
"""
response = self.client.get(
'/verify_student/software-secure-fake-response'
)
self.assertEqual(response.status_code, 200)
self.assertIn('EdX-ID', response.content)
self.assertIn('results_callback', response.content)
| agpl-3.0 |
jieter/f-engrave | application/settings.py | 1 | 9320 | import os
def cast_boolean(value):
if type(value) is bool:
return bool(value)
elif len(value) > 1:
return value == 'True'
else:
return bool(int(value))
def cast_string(value):
value = str(value).strip()
value = value.replace('\\n', '\n')
# unquote string
if value.startswith('"') and value.endswith('"'):
return value[1:-1].strip()
else:
return value
CAST_TYPES = {
'str': cast_string,
'bool': cast_boolean,
'int': int,
'float': float
}
# Old names to maintain backwards compatibility while reading
# config files. Only supported while loading values
# from config files.
OLD_SETTING_NAMES = {
'gpost': 'gcode_postamble',
'gpre': 'gcode_preamble',
'bmp_long': 'bmp_longcurve',
'bmp_optto': 'bmp_opttolerance',
'bmp_turnp': 'bmp_turnpol',
'bmp_turds': 'bmp_turdsize',
'bmp_alpha': 'bmp_alphamax',
'v_drv_crner': 'v_drv_corner',
'v_stp_crner': 'v_step_corner',
'FEED': 'feedrate',
'PLUNGE': 'plunge_rate',
'WSPACE': 'word_space',
'CSPACE': 'char_space',
'LSPACE': 'line_space',
'TANGLE': 'text_angle',
'TCODE': 'text_code',
'H_CALC': 'height_calculation',
'XSCALE': 'xscale',
'YSCALE': 'yscale',
'STHICK': 'line_thickness',
'TRADIUS': 'text_radius',
'ZSAFE': 'zsafe',
'ZCUT': 'zcut',
}
CONFIG_FILENAME = 'config.ngc'
CONFIG_MARKER = '(fengrave_set '
CONFIG_TEMPLATE = CONFIG_MARKER + '%20s %s )'
TEXT_CODE = 'text_code'
CUT_TYPE_ENGRAVE = 'engrave'
CUT_TYPE_VCARVE = 'v-carve'
HOME_DIR = os.path.expanduser("~")
NGC_FILE = (HOME_DIR + "/None")
# IMAGE_FILE = (HOME_DIR + "/None")
IMAGE_FILE = (HOME_DIR + "/Desktop/None") # TEST
class Settings(object):
"""
Default values for the application settings.
"""
_defaults = {
'HOME_DIR': HOME_DIR,
'NGC_FILE': NGC_FILE,
'IMAGE_FILE': IMAGE_FILE,
'config_filename': CONFIG_FILENAME,
'batch': False,
'show_axis': True,
'show_box': True,
'show_thick': True,
'flip': False,
'mirror': False,
# text plotted on a circle with radius
'text_radius': 0.0,
'outer': True, # outside circle
'upper': True, # on top of cirle
'fontdex': False,
'useIMGsize': False,
# flip normals (V-carve side)
'v_flop': False,
# ball carve (ball nose cutter)
'b_carve': False,
# TODO is "BALL" shape valid, or is this covered by b_carve?
# options: 'VBIT', 'FLAT', 'BALL'
'bit_shape': 'VBIT',
# plot during v-carve calculation [GUI]
'v_pplot': False,
'inlay': False,
'no_comments': True,
# arc fitting, options 'none', 'center', 'radius'
'arc_fit': 'none',
'ext_char': False,
# disable variables in gcode [GCODE]
'var_dis': True,
# cleanup cut directions
'clean_P': True,
'clean_X': True,
'clean_Y': False,
# V-Bit cut directions
'v_clean_P': False,
'v_clean_X': True,
'v_clean_Y': False,
'yscale': 50.8,
'xscale': 100.0,
'line_space': 1.2,
'char_space': 25,
'word_space': 100,
'text_angle': 0.0,
# safe height [GCODE]
'zsafe': 5.0,
# engraving depth [GCODE]
'zcut': -0.1,
# derived value
'max_cut': 0.0,
'line_thickness': 0.25,
'border_thickness': 0.5,
# options: 'Default',
# 'Top-Left', 'Top-Center', 'Top-Right',
# 'Mid-Left', 'Mid-Center', 'Mid-Right',
# 'Bot-Left', 'Bot-Center', 'Bot-Right'
'origin': 'Default',
# options: 'Left', 'Right', 'Center'
'justify': 'Left',
# options: 'in', 'mm'
'units': 'mm',
# options: 'in/min', 'mm/min'
'feed_units': 'mm/min',
# horizontal feedrate [GCODE]
'feedrate': 60.0,
# feedrate for plunging into stock [GCODE]
'plunge_rate': 10.0,
# which bounding boxes are used to calculate line height
# options: 'max_all', 'max_use'
'height_calculation': 'max_use',
# Add a box/circle around plot
'plotbox': False,
# Gap between box and engraving
'boxgap': 6.35,
# font location and name
'fontdir': 'fonts',
'fontfile': 'normal.cxf',
# options: 'engrave', 'v-carve'
'cut_type': CUT_TYPE_ENGRAVE,
# 'cut_type': CUT_TYPE_VCARVE,
# options: 'text', 'image'
'input_type': 'text',
# 'input_type': 'image',
# v-cutter parameters
# options: 'scorch', 'voronoi'
'v_strategy': 'scorch',
'v_bit_angle': 60,
'v_bit_dia': 3.0,
'v_depth_lim': 0.0,
'v_drv_corner': 135,
'v_step_corner': 200,
'v_step_len': 0.254,
# v-carve loop accuracy
'v_acc': 0.00254,
'allowance': 0.0,
# options: 'chr', 'all'
'v_check_all': 'all',
'v_rough_stk': 0.0,
'v_max_cut': 0.0,
# options: 'black', 'white', 'right', 'left', 'minority', 'majority', or 'random'
'bmp_turnpol': 'minority',
'bmp_turdsize': 2,
'bmp_alphamax': 1.0,
'bmp_opttolerance': 0.2,
'bmp_longcurve': True,
'xorigin': 0.0,
'yorigin': 0.0,
'segarc': 5.0,
'accuracy': 0.001,
# diameter of the cleanup bit
'clean_dia': 3.0,
# clean-up step-over as percentage of the clean-up bit diameter
'clean_step': 50,
# Width of the clean-up search area (obsolete before or since v1.65)
'clean_w': 50.8,
'clean_v': 1.27,
'clean_name': '_clean',
# G-Code Default Preamble
#
# G17 : sets XY plane
# G64 P0.003 : G64 P- (motion blending tolerance set to 0.003 (units))
# G64 without P option keeps the best speed possible, no matter how
# far away from the programmed point you end up.
# M3 S3000 : Spindle start at 3000
# M7 : Turn mist coolant on
'gcode_preamble': 'G17 G64 P0.003 M3 S3000 M7',
# G-Code Default Postamble
#
# M5 : Stop Spindle
# M9 : Turn all coolant off
# M2 : End Program
'gcode_postamble': 'M5 M9 M2',
'default_text': 'OOF-Engrave',
'text_code': '',
}
def __init__(self, filename=None, autoload=False):
self._settings = self._defaults.copy()
self._text_code = u''
if filename is not None:
self.from_configfile(filename)
elif autoload:
files_to_try = (
CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + CONFIG_FILENAME,
os.path.expanduser('~') + os.path.sep + '.fengraverc'
)
available = [c for c in files_to_try if os.path.isfile(c)]
if len(available) > 0:
self.from_configfile(available[0])
def __iter__(self):
return self._settings.items()
def type(self, name):
return str(type(self._settings[name]))[7:-2]
def set(self, name, value):
if name == TEXT_CODE:
self._set_text_code(value)
else:
cast = CAST_TYPES[self.type(name)]
self._settings[name] = cast(value)
def get(self, name):
return self._settings[name]
# only for use in C-API calls
def get_dict(self):
return self._settings
def reset(self, name=None):
if name is None:
self._settings = self._defaults.copy()
else:
self.set(name, self._defaults[name])
def has_setting(self, name):
return name in self._settings
def get_fontfile(self):
return self.get('fontdir') + os.path.sep + self.get('fontfile')
def from_configfile(self, filename):
with open(filename, 'r') as config:
for line in config.readlines():
if not line.startswith(CONFIG_MARKER):
continue
line = line[len(CONFIG_MARKER):].strip()
name = line.split(' ')[0].strip()
setting = line[len(name):-1].strip()
if not self.has_setting(name) and name in OLD_SETTING_NAMES:
name = OLD_SETTING_NAMES[name]
try:
self.set(name, setting)
except KeyError:
print 'Setting not found:', name # TODO
def to_gcode(self):
gcode = [CONFIG_TEMPLATE % (key, str(value).replace('\n', '\\n'))
for key, value in self._settings.items()]
return gcode
def get_text_code(self):
return self._text_code
def _set_text_code(self, line):
text_code = u''
code_list = line.split()
for char in code_list:
try:
text_code += "%c" % unichr(int(char))
except:
text_code += "%c" % chr(int(char))
self._text_code = text_code
def __str__(self):
return 'Settings:\n' + ('\n'.join([', '.join(map(str, l)) for l in self._settings.items()]))
| gpl-3.0 |
iamaziz/simpleai | simpleai/machine_learning/reinforcement_learning.py | 5 | 6345 | # -*- coding: utf-8 -*-
from collections import defaultdict, Counter
import math
import random
from simpleai.search.utils import argmax
import pickle
try:
import matplotlib.pyplot as plt
import numpy
except:
plt = None # lint:ok
numpy = None # lint:ok
def make_at_least_n_times(optimistic_reward, min_n):
def at_least_n_times_exploration(actions, utilities, temperature, action_counter):
utilities = [utilities[x] for x in actions]
for i, utility in enumerate(utilities):
if action_counter[actions[i]] < min_n:
utilities[i] = optimistic_reward
d = dict(zip(actions, utilities))
uf = lambda action: d[action]
return argmax(actions, uf)
return at_least_n_times_exploration
def boltzmann_exploration(actions, utilities, temperature, action_counter):
'''returns an action with a probability depending on utilities and temperature'''
utilities = [utilities[x] for x in actions]
temperature = max(temperature, 0.01)
_max = max(utilities)
_min = min(utilities)
if _max == _min:
return random.choice(actions)
utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities]
probs = [u / sum(utilities) for u in utilities]
i = 0
tot = probs[i]
r = random.random()
while i < len(actions) and r >= tot:
i += 1
tot += probs[i]
return actions[i]
def make_exponential_temperature(initial_temperature, alpha):
'''returns a function like initial / exp(n * alpha)'''
def _function(n):
try:
return initial_temperature / math.exp(n * alpha)
except OverflowError:
return 0.01
return _function
class PerformanceCounter(object):
def __init__(self, learners, names=None):
self.learners = learners
for i, learner in enumerate(learners):
self.update_set_reward(learner)
learner.accumulated_rewards = []
learner.known_states = []
learner.temperatures = []
if names is None:
learner.name = 'Learner %d' % i
else:
learner.name = names[i]
def update_set_reward(self, learner):
def set_reward(reward, terminal=False):
if terminal:
if len(learner.accumulated_rewards) > 0:
learner.accumulated_rewards.append(learner.accumulated_rewards[-1] + reward)
else:
learner.accumulated_rewards.append(reward)
learner.known_states.append(len(learner.Q))
learner.temperatures.append(learner.temperature_function(learner.trials))
learner.old_set_reward(reward, terminal)
learner.old_set_reward = learner.set_reward
learner.set_reward = set_reward
def _make_plot(self, ax, data_name):
for learner in self.learners:
data = numpy.array(getattr(learner, data_name))
ax.plot(numpy.arange(len(data)), data, label=learner.name)
nice_name = data_name.replace('_', ' ').capitalize()
ax.set_title(nice_name)
ax.legend()
def show_statistics(self):
f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
self._make_plot(ax1, 'accumulated_rewards')
self._make_plot(ax2, 'known_states')
self._make_plot(ax3, 'temperatures')
plt.show()
class RLProblem(object):
def actions(self, state):
'''Returns the actions available to perform from `state`.
The returned value is an iterable over actions.
'''
raise NotImplementedError()
def update_state(self, percept, agent):
'Override this method if you need to clean perception to a given agent'
return percept
def inverse(n):
if n == 0:
return 1
return 1.0 / n
def state_default():
return defaultdict(int)
class QLearner(object):
def __init__(self, problem, temperature_function=inverse,
discount_factor=1,
exploration_function=boltzmann_exploration,
learning_rate=inverse):
self.Q = defaultdict(state_default)
self.problem = problem
self.discount_factor = discount_factor
self.temperature_function = temperature_function
self.exploration_function = exploration_function
self.learning_rate = learning_rate
self.last_state = None
self.last_action = None
self.last_reward = None
self.counter = defaultdict(Counter)
self.trials = 0
def set_reward(self, reward, terminal=False):
self.last_reward = reward
if terminal:
self.trials += 1
self.Q[self.last_state][self.last_action] = reward
def program(self, percept):
s = self.last_state
a = self.last_action
state = self.problem.update_state(percept, self)
actions = self.problem.actions(state)
if len(actions) > 0:
current_action = self.exploration_function(actions, self.Q[state],
self.temperature_function(self.trials),
self.counter[state])
else:
current_action = None
if s is not None and current_action:
self.counter[s][a] += 1
self.update_rule(s, a, self.last_reward, state, current_action)
self.last_state = state
self.last_action = current_action
return current_action
def update_rule(self, s, a, r, cs, ca):
raise NotImplementedError
def dump(self, path):
self.temperature_function = inverse
with open(path, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(self, path):
with open(path, 'rb') as f:
return pickle.load(f)
class TDQLearner(QLearner):
def update_rule(self, s, a, r, cs, ca):
lr = self.learning_rate(self.counter[s][a])
self.Q[s][a] += lr * (r + self.discount_factor * max(self.Q[cs].values()) - self.Q[s][a])
class SARSALearner(QLearner):
def update_rule(self, s, a, r, cs, ca):
lr = self.learning_rate(self.counter[s][a])
self.Q[s][a] += lr * (r + self.discount_factor * self.Q[cs][ca] - self.Q[s][a])
| mit |
wd5/jangr | _django/utils/log.py | 152 | 3494 | import logging
import sys
from django.core import mail
# Make sure a NullHandler is available
# This was added in Python 2.7/3.2
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Make sure that dictConfig is available
# This was added in Python 2.7/3.2
try:
from logging.config import dictConfig
except ImportError:
from django.utils.dictconfig import dictConfig
if sys.version_info < (2, 5):
class LoggerCompat(object):
def __init__(self, logger):
self._logger = logger
def __getattr__(self, name):
val = getattr(self._logger, name)
if callable(val):
def _wrapper(*args, **kwargs):
# Python 2.4 logging module doesn't support 'extra' parameter to
# methods of Logger
kwargs.pop('extra', None)
return val(*args, **kwargs)
return _wrapper
else:
return val
def getLogger(name=None):
return LoggerCompat(logging.getLogger(name=name))
else:
getLogger = logging.getLogger
# Ensure the creation of the Django logger
# with a null handler. This ensures we don't get any
# 'No handlers could be found for logger "django"' messages
logger = getLogger('django')
if not logger.handlers:
logger.addHandler(NullHandler())
class AdminEmailHandler(logging.Handler):
def __init__(self, include_html=False):
logging.Handler.__init__(self)
self.include_html = include_html
"""An exception log handler that e-mails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the
"""
def emit(self, record):
import traceback
from django.conf import settings
from django.views.debug import ExceptionReporter
try:
if sys.version_info < (2,5):
# A nasty workaround required because Python 2.4's logging
# module doesn't support passing in extra context.
# For this handler, the only extra data we need is the
# request, and that's in the top stack frame.
request = record.exc_info[2].tb_frame.f_locals['request']
else:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
(request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'),
record.msg
)
request_repr = repr(request)
except:
subject = '%s: %s' % (
record.levelname,
record.msg
)
request = None
request_repr = "Request repr() unavailable"
if record.exc_info:
exc_info = record.exc_info
stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
else:
exc_info = (None, record.msg, None)
stack_trace = 'No stack trace available'
message = "%s\n\n%s" % (stack_trace, request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = self.include_html and reporter.get_traceback_html() or None
mail.mail_admins(subject, message, fail_silently=True,
html_message=html_message)
| bsd-3-clause |
CumulusNetworks/ansible-modules-extras | database/postgresql/postgresql_lang.py | 116 | 9789 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Jens Depuydt <http://www.jensd.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_lang
short_description: Adds, removes or changes procedural languages with a PostgreSQL database.
description:
- Adds, removes or changes procedural languages with a PostgreSQL database.
- This module allows you to add a language, remote a language or change the trust
relationship with a PostgreSQL database. The module can be used on the machine
where executed or on a remote host.
- When removing a language from a database, it is possible that dependencies prevent
the database from being removed. In that case, you can specify casade to
automatically drop objects that depend on the language (such as functions in the
language). In case the language can't be deleted because it is required by the
database system, you can specify fail_on_drop=no to ignore the error.
- Be carefull when marking a language as trusted since this could be a potential
security breach. Untrusted languages allow only users with the PostgreSQL superuser
privilege to use this language to create new functions.
version_added: "1.7"
options:
lang:
description:
- name of the procedural language to add, remove or change
required: true
default: null
trust:
description:
- make this language trusted for the selected db
required: false
default: no
choices: [ "yes", "no" ]
db:
description:
- name of database where the language will be added, removed or changed
required: false
default: null
force_trust:
description:
- marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
- use with care!
required: false
default: no
choices: [ "yes", "no" ]
fail_on_drop:
description:
- if C(yes), fail when removing a language. Otherwise just log and continue
- in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade).
required: false
default: 'yes'
choices: [ "yes", "no" ]
cascade:
description:
- when dropping a language, also delete object that depend on this language.
- only used when C(state=absent).
required: false
default: no
choices: [ "yes", "no" ]
port:
description:
- Database port to connect to.
required: false
default: 5432
login_user:
description:
- User used to authenticate with PostgreSQL
required: false
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL (must match C(login_user))
required: false
default: null
login_host:
description:
- Host running PostgreSQL where you want to execute the actions.
required: false
default: localhost
state:
description:
- The state of the language for the selected database
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Jens Depuydt (@jensdepuydt)"
'''
EXAMPLES = '''
# Add language pltclu to database testdb if it doesn't exist:
- postgresql_lang db=testdb lang=pltclu state=present
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted:
# Marks the language as trusted if it exists but isn't trusted yet
# force_trust makes sure that the language will be marked as trusted
- postgresql_lang db=testdb lang=pltclu state=present trust=yes force_trust=yes
# Remove language pltclu from database testdb:
- postgresql_lang: db=testdb lang=pltclu state=absent
# Remove language pltclu from database testdb and remove all dependencies:
- postgresql_lang: db=testdb lang=pltclu state=absent cascade=yes
# Remove language c from database testdb but ignore errors if something prevents the removal:
- postgresql_lang: db=testdb lang=pltclu state=absent fail_on_drop=no
'''
try:
import psycopg2
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
def lang_exists(cursor, lang):
"""Checks if language exists for db"""
query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.rowcount > 0
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.fetchone()[0]
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
cursor.execute(query, (trust, lang))
return True
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
cursor.execute(query)
return True
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
try:
if cascade:
cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang)
else:
cursor.execute("DROP LANGUAGE \"%s\"" % lang)
except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
db=dict(required=True),
port=dict(default='5432'),
lang=dict(required=True),
state=dict(default="present", choices=["absent", "present"]),
trust=dict(type='bool', default='no'),
force_trust=dict(type='bool', default='no'),
cascade=dict(type='bool', default='no'),
fail_on_drop=dict(type='bool', default='yes'),
),
supports_check_mode = True
)
db = module.params["db"]
port = module.params["port"]
lang = module.params["lang"]
state = module.params["state"]
trust = module.params["trust"]
force_trust = module.params["force_trust"]
cascade = module.params["cascade"]
fail_on_drop = module.params["fail_on_drop"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port",
"db":"database"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != "" )
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
changed = False
lang_dropped = False
kw = dict(db=db,lang=lang,trust=trust)
if state == "present":
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:
changed = lang_drop(cursor, lang, cascade)
if fail_on_drop and not changed:
msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore"
module.fail_json(msg=msg)
kw['lang_dropped'] = changed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
ic-labs/django-icekit | icekit/api/images/serializers.py | 1 | 2783 | from django.apps import apps
from rest_framework import serializers
from rest_framework.settings import api_settings
from drf_queryfields import QueryFieldsMixin
from icekit.api.base_serializers import WritableSerializerHelperMixin, \
WritableRelatedFieldSettings
Image = apps.get_model('icekit_plugins_image.Image')
MediaCategory = apps.get_model('icekit.MediaCategory')
class MediaCategorySerializer(serializers.ModelSerializer):
# Redefine `name` field here to avoid `unique=True` constraint that will
# be unavoidably applied by DRF validators if we leave the field to be
# autogenerated based on the model.
name = serializers.CharField(
max_length=255,
read_only=False,
required=False,
)
class Meta:
model = MediaCategory
fields = ['id', 'name']
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
}
class ImageSerializer(
WritableSerializerHelperMixin,
QueryFieldsMixin,
serializers.HyperlinkedModelSerializer
):
"""
A serializer for an ICEkit Image.
"""
categories = MediaCategorySerializer(
many=True,
)
class Meta:
model = Image
fields = [
api_settings.URL_FIELD_NAME,
'id',
'image',
'width',
'height',
'title',
'alt_text',
'caption',
'credit',
'source',
'external_ref',
'categories',
'license',
'notes',
'date_created',
'date_modified',
'is_ok_for_web',
'is_cropping_allowed',
]
extra_kwargs = {
'url': {
'lookup_field': 'pk',
'view_name': 'api:image-api-detail',
},
}
writable_related_fields = {
'categories': WritableRelatedFieldSettings(
lookup_field=['id', 'name'], can_create=True),
}
# TODO It is probably not a good idea to allow API user to set auto-gen ID
# field, but this is the only way I have found (so far) to allow ID to be
# passed through API to relate existing images.
class RelatedImageSerializer(ImageSerializer):
"""
A serializer for an ICEkit Image relationships that exposes the ID primary
key field to permit referring to existing images by ID, instead of needing
to upload an actual image file every time.
"""
class Meta(ImageSerializer.Meta):
extra_kwargs = {
'id': {
'read_only': False,
'required': False,
},
'image': {
'required': False,
}
}
| mit |
tayfun/django | django/contrib/gis/db/backends/oracle/introspection.py | 539 | 1977 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
pianomania/scikit-learn | sklearn/externals/joblib/_memory_helpers.py | 303 | 3605 | try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text | bsd-3-clause |
rmcauley/rainwave | rainwave/playlist_objects/artist.py | 1 | 5684 | from libs import db
from libs import config
from rainwave.playlist_objects.metadata import (
AssociatedMetadata,
MetadataUpdateError,
make_searchable_string,
)
class Artist(AssociatedMetadata):
select_by_name_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE lower(artist_name) = lower(%s)"
select_by_id_query = "SELECT artist_id AS id, artist_name AS name FROM r4_artists WHERE artist_id = %s"
select_by_song_id_query = 'SELECT r4_artists.artist_id AS id, r4_artists.artist_name AS name, r4_song_artist.artist_is_tag AS is_tag, artist_order AS "order" FROM r4_song_artist JOIN r4_artists USING (artist_id) WHERE song_id = %s ORDER BY artist_order'
disassociate_song_id_query = (
"DELETE FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
)
associate_song_id_query = "INSERT INTO r4_song_artist (song_id, artist_id, artist_is_tag, artist_order) VALUES (%s, %s, %s, %s)"
has_song_id_query = "SELECT COUNT(song_id) FROM r4_song_artist WHERE song_id = %s AND artist_id = %s"
check_self_size_query = "SELECT COUNT(song_id) FROM r4_song_artist JOIN r4_songs USING (song_id) WHERE artist_id = %s AND song_verified = TRUE"
delete_self_query = "DELETE FROM r4_artists WHERE artist_id = %s"
# needs to be specialized because of artist_order
def associate_song_id(self, song_id, is_tag=None, order=None):
if not order and not self.data.get("order"):
order = db.c.fetch_var(
"SELECT MAX(artist_order) FROM r4_song_artist WHERE song_id = %s",
(song_id,),
)
if not order:
order = -1
order += 1
elif not order:
order = self.data["order"]
self.data["order"] = order
if is_tag == None:
is_tag = self.is_tag
else:
self.is_tag = is_tag
if db.c.fetch_var(self.has_song_id_query, (song_id, self.id)) > 0:
pass
else:
if not db.c.update(
self.associate_song_id_query, (song_id, self.id, is_tag, order)
):
raise MetadataUpdateError(
"Cannot associate song ID %s with %s ID %s"
% (song_id, self.__class__.__name__, self.id)
)
def _insert_into_db(self):
self.id = db.c.get_next_id("r4_artists", "artist_id")
return db.c.update(
"INSERT INTO r4_artists (artist_id, artist_name, artist_name_searchable) VALUES (%s, %s, %s)",
(self.id, self.data["name"], make_searchable_string(self.data["name"])),
)
def _update_db(self):
return db.c.update(
"UPDATE r4_artists SET artist_name = %s, artist_name_searchable = %s WHERE artist_id = %s",
(self.data["name"], make_searchable_string(self.data["name"]), self.id),
)
def _start_cooldown_db(self, sid, cool_time):
# Artists don't have cooldowns on Rainwave.
pass
def _start_election_block_db(self, sid, num_elections):
# Artists don't block elections either (OR DO THEY) (they don't)
pass
def load_all_songs(self, sid, user_id=1):
all_songs = db.c.fetch_all(
"SELECT r4_song_artist.song_id AS id, "
"r4_songs.song_origin_sid AS sid, "
"song_title AS title, "
"CAST(ROUND(CAST(song_rating AS NUMERIC), 1) AS REAL) AS rating, "
"song_exists AS requestable, "
"song_length AS length, "
"song_cool AS cool, "
"song_cool_end AS cool_end, "
"song_url as url, song_link_text as link_text, "
"COALESCE(song_rating_user, 0) AS rating_user, "
"COALESCE(song_fave, FALSE) AS fave, "
"album_name, r4_albums.album_id "
"FROM r4_song_artist "
"JOIN r4_songs USING (song_id) "
"JOIN r4_albums USING (album_id) "
"LEFT JOIN r4_album_sid ON (r4_albums.album_id = r4_album_sid.album_id AND r4_album_sid.sid = %s) "
"LEFT JOIN r4_song_sid ON (r4_songs.song_id = r4_song_sid.song_id AND r4_song_sid.sid = %s) "
"LEFT JOIN r4_song_ratings ON (r4_song_artist.song_id = r4_song_ratings.song_id AND r4_song_ratings.user_id = %s) "
"WHERE r4_song_artist.artist_id = %s AND r4_songs.song_verified = TRUE "
"ORDER BY song_exists DESC, album_name, song_title",
(sid, sid, user_id, self.id),
)
# And of course, now we have to burn extra CPU cycles to make sure the right album name is used and that we present the data
# in the same format seen everywhere else on the API. Still, much faster then loading individual song objects.
self.data["all_songs"] = {}
for configured_sids in config.station_ids:
self.data["all_songs"][configured_sids] = {}
requestable = True if user_id > 1 else False
for song in all_songs:
if not song["sid"] in config.station_ids:
continue
song["requestable"] = requestable and song["requestable"]
if not song["album_id"] in self.data["all_songs"][song["sid"]]:
self.data["all_songs"][song["sid"]][song["album_id"]] = []
self.data["all_songs"][song["sid"]][song["album_id"]].append(song)
song["albums"] = [
{
"name": song.pop("album_name"),
"id": song.pop("album_id"),
}
]
def to_dict(self, user=None):
d = super(Artist, self).to_dict(user)
d["order"] = self.data["order"]
return d
| gpl-2.0 |
roryk/bipy | bipy/toolbox/cutadapt_tool.py | 1 | 5658 | """This module provides an interface to cutadapt with a set of commonly
used adapters for trimming
"""
from bipy.utils import flatten_options, append_stem, flatten, which
import subprocess
import os
from bcbio.utils import safe_makedir, file_exists
import sh
import yaml
import bcbio.provenance.do as do
# adapter sequences for various commonly used systems
ADAPTERS = {}
ADAPTERS["illumina"] = [
["ACACTCTTTCCCTACACGACGCTCTTCCGATCT", "-a", "ill_pe_adapter1"],
["TGTGAGAAAGGGATGTGCTGCGAGAAGGCTAG", "-a", "ill_pe_adapter1_rc"],
["GATCGGAAGAGCGGTTCAGCAGGAATGCCGAG", "-a", "ill_pe_adapter2"],
["TCTAGCCTTCTCGCCAAGTCGTCCTTACGGCTC", "-a", "ill_pe_adapter2_rc"]]
ADAPTERS["nextera"] = [
["AATGATACGGCGACCACCGAGATCTACACGCCTCCCTCGCGCCATCAG", "-a",
"nex_pe_adapter1"],
["CTGATGGCGCGAGGGAGGCGTGTAGATCTCGGTGGTCGCCGTATCATT", "-a",
"nex_pe_adapter1_rc"],
["CAAGCAGAAGACGGCATACGAGATCGGTCTGCCTTGCCAGCCCGCTCAG",
"-a", "nex_pe_adapter2_nobc"],
["CTGAGCGGGCTGGCAAGGCAGACCGATCTCGTATGCCGTCTTCTGCTTG",
"-a", "nex_pe_adapter2_nobc_rc"],
["CTGATGGCGCGAGGGAGGCGTGTAGATCTCGGTGGTCGCCGTATCATTCTGTCTCTTATACACATCT",
"-a", "nex_transposon_pe_adapter1_rc"],
["AGATGTGTATAAGAGACAGAATGATACGGCGACCACCGAGATCTACACGCCTCCCTCGCGCCATCAG",
"-a", "nex_transposon_pe_adapter1"],
["AGATGTGTATAAGAGACAGCAAGCAGAAGACGGCATACGAGATCGGTCTGCCTTGCCAGCCCGCTCAG",
"-a", "nex_tranposon_pe_adapter2"]]
ADAPTERS["polya"] = [
["AAAAAAAAAAAAAAAAAAAAAAAAAAA", "-a", "polyA tail"],
["TTTTTTTTTTTTTTTTTTTTTTTTTTT", "-a", "polyT tail"]]
ADAPTERS["iontorrent"] = [
["CCACTACGCCTCCGCTTTCCTCTCTATGGGCAGTCGGTGAT", "-a",
"ion_5_prime_adapter"],
["CTGAGTCGGAGACACGCAGGGATGAGATGG", "-a", "3_prime_adapter"],
["ATCACCGACTGCCCATAGAGAGGAAAGCGGAGGCGTAGTGG", "-a",
"5_prime_adapter_rc"],
["CCATCTCATCCCTGCGTGTCTCCGACTCAG", "-a", "3_prime_adapter_rc"]]
TRUSEQ_BARCODES = {"ATCACG": 1, "AGTCAA": 13, "ACTGAT": 25, "CGGAAT": 37,
"CGATGT": 2, "AGTTCC": 14, "ATGAGC": 26, "CTAGCT": 38,
"TTAGGC": 3, "ATGTCA": 15, "ATTCCT": 27, "CTATAC": 39,
"TGACCA": 4, "CCGTCC": 16, "CAAAAG": 28, "CTCAGA": 40,
"ACAGTG": 5, "GTAGAG": 17, "CAACTA": 29, "GACGAC": 41,
"GCCAAT": 6, "GTCCGC": 18, "CACCGG": 30, "TAATCG": 42,
"CAGATC": 7, "GTGAAA": 19, "CACGAT": 31, "TACAGC": 43,
"ACTTGA": 8, "GTGGCC": 20, "CACTCA": 32, "TATAAT": 44,
"GATCAG": 9, "GTTTCG": 21, "CAGGCG": 33, "TCATTC": 45,
"TAGCTT": 10, "CGTACG": 22, "CATGGC": 34, "TCCCGA": 46,
"GGCTAC": 11, "GAGTGG": 23, "CATTTT": 35, "TCGAAG": 47,
"CTTGTA": 12, "GGTAGC": 24, "CCAACA": 36, "TCGGCA": 48}
VALID_TRUSEQ_RNASEQ = {k: v for (k, v) in TRUSEQ_BARCODES.items() if v < 13}
TRUSEQ_PREFIX = "GATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
def truseq_barcode_lookup(barcode, small=False):
"""
looks up a truseq adapter sequence by inserting the barcode in the
correct sequence. throws an exception if the barcode does not match
known barcodes
"""
prefix = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
suffix = "ATCTCGTATGCCGTCTTCTGCTTG"
if small:
raise NotImplementedError("Small RNA barcodes not implemented. Need "
"to check to make sure the prefix and "
"suffix sequences are the same as the "
"RNA-seq barcodes.")
if barcode not in VALID_TRUSEQ_RNASEQ:
raise ValueError("Barcode not found in TruSeq barcodes. Might need "
"to implement v1 and v2 versions.")
return prefix + barcode + suffix
def _get_adapter(adapter):
return [adapter[1], adapter[0]]
def _get_platform_adapters(platform):
platform_adapters = ADAPTERS.get(platform, [])
adapters = map(_get_adapter, platform_adapters)
return adapters
def _parse(config):
# handle the adapters, defaulting to illumina and a poly-a trimmer
# if none are provided
adapters = []
adapters += flatten(map(_get_adapter,
config.get("adapters", [])))
# add built in platform if available
platform = config.get("platform", None)
if platform:
adapters += flatten(map(_get_platform_adapters,
[p for p in platform if p in ADAPTERS]))
# default to illumina and poly A
if not adapters:
adapters += flatten(map(_get_platform_adapters,
[p for p in ["illumina", "polya"]]))
arguments = []
arguments += adapters
# grab everything else
arguments += config.get("options", [])
return map(str, list(flatten(arguments)))
def run(in_file, stage_config, config):
arguments = [stage_config["program"]]
arguments += _parse(stage_config)
results_dir = config["dir"].get("results", None)
if results_dir:
out_dir = os.path.join(results_dir, "cutadapt")
safe_makedir(out_dir)
out_file = os.path.join(out_dir,
os.path.basename(append_stem(in_file,
"trimmed")))
else:
out_file = append_stem(in_file, "trimmed")
if file_exists(out_file):
return out_file
arguments.extend(["--output", out_file, in_file])
do.run(arguments, "Running cutadapt on %s." % (in_file),
None)
return out_file
def _common_prefix(first, second):
for i, (x, y) in enumerate(zip(first, second)):
if x != y:
break
return first[:i]
| mit |
kadel/kedge | vendor/github.com/openshift/origin/cmd/cluster-capacity/go/src/github.com/kubernetes-incubator/cluster-capacity/vendor/k8s.io/kubernetes/examples/selenium/selenium-test.py | 497 | 1089 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def check_browser(browser):
driver = webdriver.Remote(
command_executor='http://selenium-hub:4444/wd/hub',
desired_capabilities=getattr(DesiredCapabilities, browser)
)
driver.get("http://google.com")
assert "google" in driver.page_source
driver.close()
print("Browser %s checks out!" % browser)
check_browser("FIREFOX")
check_browser("CHROME")
| apache-2.0 |
ledinhphuong/electron-builder | packages/dmg-builder/vendor/ds_store/buddy.py | 17 | 14412 | # -*- coding: utf-8 -*-
import os
import bisect
import struct
import binascii
try:
{}.iterkeys
iterkeys = lambda x: x.iterkeys()
except AttributeError:
iterkeys = lambda x: x.keys()
try:
unicode
except NameError:
unicode = str
class BuddyError(Exception):
pass
class Block(object):
def __init__(self, allocator, offset, size):
self._allocator = allocator
self._offset = offset
self._size = size
self._value = bytearray(allocator.read(offset, size))
self._pos = 0
self._dirty = False
def __len__(self):
return self._size
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._dirty:
self.flush()
def flush(self):
if self._dirty:
self._dirty = False
self._allocator.write(self._offset, self._value)
def invalidate(self):
self._dirty = False
def zero_fill(self):
len = self._size - self._pos
zeroes = b'\0' * len
self._value[self._pos:self._size] = zeroes
self._dirty = True
def tell(self):
return self._pos
def seek(self, pos, whence=os.SEEK_SET):
if whence == os.SEEK_CUR:
pos += self._pos
elif whence == os.SEEK_END:
pos = self._size - pos
if pos < 0 or pos > self._size:
raise ValueError('Seek out of range in Block instance')
self._pos = pos
def read(self, size_or_format):
if isinstance(size_or_format, (str, unicode, bytes)):
size = struct.calcsize(size_or_format)
fmt = size_or_format
else:
size = size_or_format
fmt = None
if self._size - self._pos < size:
raise BuddyError('Unable to read %lu bytes in block' % size)
data = self._value[self._pos:self._pos + size]
self._pos += size
if fmt is not None:
if isinstance(data, bytearray):
return struct.unpack_from(fmt, bytes(data))
else:
return struct.unpack(fmt, data)
else:
return data
def write(self, data_or_format, *args):
if len(args):
data = struct.pack(data_or_format, *args)
else:
data = data_or_format
if self._pos + len(data) > self._size:
raise ValueError('Attempt to write past end of Block')
self._value[self._pos:self._pos + len(data)] = data
self._pos += len(data)
self._dirty = True
def insert(self, data_or_format, *args):
if len(args):
data = struct.pack(data_or_format, *args)
else:
data = data_or_format
del self._value[-len(data):]
self._value[self._pos:self._pos] = data
self._pos += len(data)
self._dirty = True
def delete(self, size):
if self._pos + size > self._size:
raise ValueError('Attempt to delete past end of Block')
del self._value[self._pos:self._pos + size]
self._value += b'\0' * size
self._dirty = True
def __str__(self):
return binascii.b2a_hex(self._value)
class Allocator(object):
def __init__(self, the_file):
self._file = the_file
self._dirty = False
self._file.seek(0)
# Read the header
magic1, magic2, offset, size, offset2, self._unknown1 \
= self.read(-4, '>I4sIII16s')
if magic2 != b'Bud1' or magic1 != 1:
raise BuddyError('Not a buddy file')
if offset != offset2:
raise BuddyError('Root addresses differ')
self._root = Block(self, offset, size)
# Read the block offsets
count, self._unknown2 = self._root.read('>II')
self._offsets = []
c = (count + 255) & ~255
while c:
self._offsets += self._root.read('>256I')
c -= 256
self._offsets = self._offsets[:count]
# Read the TOC
self._toc = {}
count = self._root.read('>I')[0]
for n in range(count):
nlen = self._root.read('B')[0]
name = bytes(self._root.read(nlen))
value = self._root.read('>I')[0]
self._toc[name] = value
# Read the free lists
self._free = []
for n in range(32):
count = self._root.read('>I')
self._free.append(list(self._root.read('>%uI' % count)))
@classmethod
def open(cls, file_or_name, mode='r+'):
if isinstance(file_or_name, (str, unicode)):
if not 'b' in mode:
mode = mode[:1] + 'b' + mode[1:]
f = open(file_or_name, mode)
else:
f = file_or_name
if 'w' in mode:
# Create an empty file in this case
f.truncate()
# An empty root block needs 1264 bytes:
#
# 0 4 offset count
# 4 4 unknown
# 8 4 root block offset (2048)
# 12 255 * 4 padding (offsets are in multiples of 256)
# 1032 4 toc count (0)
# 1036 228 free list
# total 1264
# The free list will contain the following:
#
# 0 5 * 4 no blocks of width less than 5
# 20 6 * 8 1 block each of widths 5 to 10
# 68 4 no blocks of width 11 (allocated for the root)
# 72 19 * 8 1 block each of widths 12 to 30
# 224 4 no blocks of width 31
# total 228
#
# (The reason for this layout is that we allocate 2**5 bytes for
# the header, which splits the initial 2GB region into every size
# below 2**31, including *two* blocks of size 2**5, one of which
# we take. The root block itself then needs a block of size
# 2**11. Conveniently, each of these initial blocks will be
# located at offset 2**n where n is its width.)
# Write the header
header = struct.pack(b'>I4sIII16s',
1, b'Bud1',
2048, 1264, 2048,
b'\x00\x00\x10\x0c'
b'\x00\x00\x00\x87'
b'\x00\x00\x20\x0b'
b'\x00\x00\x00\x00')
f.write(header)
f.write(b'\0' * 2016)
# Write the root block
free_list = [struct.pack(b'>5I', 0, 0, 0, 0, 0)]
for n in range(5, 11):
free_list.append(struct.pack(b'>II', 1, 2**n))
free_list.append(struct.pack(b'>I', 0))
for n in range(12, 31):
free_list.append(struct.pack(b'>II', 1, 2**n))
free_list.append(struct.pack(b'>I', 0))
root = b''.join([struct.pack(b'>III', 1, 0, 2048 | 5),
struct.pack(b'>I', 0) * 255,
struct.pack(b'>I', 0)] + free_list)
f.write(root)
return Allocator(f)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
self.flush()
self._file.close()
def flush(self):
if self._dirty:
size = self._root_block_size()
self.allocate(size, 0)
with self.get_block(0) as rblk:
self._write_root_block_into(rblk)
addr = self._offsets[0]
offset = addr & ~0x1f
size = 1 << (addr & 0x1f)
self._file.seek(0, os.SEEK_SET)
self._file.write(struct.pack(b'>I4sIII16s',
1, b'Bud1',
offset, size, offset,
self._unknown1))
self._dirty = False
self._file.flush()
def read(self, offset, size_or_format):
"""Read data at `offset', or raise an exception. `size_or_format'
may either be a byte count, in which case we return raw data,
or a format string for `struct.unpack', in which case we
work out the size and unpack the data before returning it."""
# N.B. There is a fixed offset of four bytes(!)
self._file.seek(offset + 4, os.SEEK_SET)
if isinstance(size_or_format, (str, unicode)):
size = struct.calcsize(size_or_format)
fmt = size_or_format
else:
size = size_or_format
fmt = None
ret = self._file.read(size)
if len(ret) < size:
ret += b'\0' * (size - len(ret))
if fmt is not None:
if isinstance(ret, bytearray):
ret = struct.unpack_from(fmt, bytes(ret))
else:
ret = struct.unpack(fmt, ret)
return ret
def write(self, offset, data_or_format, *args):
"""Write data at `offset', or raise an exception. `data_or_format'
may either be the data to write, or a format string for `struct.pack',
in which case we pack the additional arguments and write the
resulting data."""
# N.B. There is a fixed offset of four bytes(!)
self._file.seek(offset + 4, os.SEEK_SET)
if len(args):
data = struct.pack(data_or_format, *args)
else:
data = data_or_format
self._file.write(data)
def get_block(self, block):
try:
addr = self._offsets[block]
except IndexError:
return None
offset = addr & ~0x1f
size = 1 << (addr & 0x1f)
return Block(self, offset, size)
def _root_block_size(self):
"""Return the number of bytes required by the root block."""
# Offsets
size = 8
size += 4 * ((len(self._offsets) + 255) & ~255)
# TOC
size += 4
size += sum([5 + len(s) for s in self._toc])
# Free list
size += sum([4 + 4 * len(fl) for fl in self._free])
return size
def _write_root_block_into(self, block):
# Offsets
block.write('>II', len(self._offsets), self._unknown2)
block.write('>%uI' % len(self._offsets), *self._offsets)
extra = len(self._offsets) & 255
if extra:
block.write(b'\0\0\0\0' * (256 - extra))
# TOC
keys = list(self._toc.keys())
keys.sort()
block.write('>I', len(keys))
for k in keys:
block.write('B', len(k))
block.write(k)
block.write('>I', self._toc[k])
# Free list
for w, f in enumerate(self._free):
block.write('>I', len(f))
if len(f):
block.write('>%uI' % len(f), *f)
def _buddy(self, offset, width):
f = self._free[width]
b = offset ^ (1 << width)
try:
ndx = f.index(b)
except ValueError:
ndx = None
return (f, b, ndx)
def _release(self, offset, width):
# Coalesce
while True:
f,b,ndx = self._buddy(offset, width)
if ndx is None:
break
offset &= b
width += 1
del f[ndx]
# Add to the list
bisect.insort(f, offset)
# Mark as dirty
self._dirty = True
def _alloc(self, width):
w = width
while not self._free[w]:
w += 1
while w > width:
offset = self._free[w].pop(0)
w -= 1
self._free[w] = [offset, offset ^ (1 << w)]
self._dirty = True
return self._free[width].pop(0)
def allocate(self, bytes, block=None):
"""Allocate or reallocate a block such that it has space for at least
`bytes' bytes."""
if block is None:
# Find the first unused block
try:
block = self._offsets.index(0)
except ValueError:
block = len(self._offsets)
self._offsets.append(0)
# Compute block width
width = max(bytes.bit_length(), 5)
addr = self._offsets[block]
offset = addr & ~0x1f
if addr:
blkwidth = addr & 0x1f
if blkwidth == width:
return block
self._release(offset, width)
self._offsets[block] = 0
offset = self._alloc(width)
self._offsets[block] = offset | width
return block
def release(self, block):
addr = self._offsets[block]
if addr:
width = addr & 0x1f
offset = addr & ~0x1f
self._release(offset, width)
if block == len(self._offsets):
del self._offsets[block]
else:
self._offsets[block] = 0
def __len__(self):
return len(self._toc)
def __getitem__(self, key):
if not isinstance(key, (str, unicode)):
raise TypeError('Keys must be of string type')
if not isinstance(key, bytes):
key = key.encode('latin_1')
return self._toc[key]
def __setitem__(self, key, value):
if not isinstance(key, (str, unicode)):
raise TypeError('Keys must be of string type')
if not isinstance(key, bytes):
key = key.encode('latin_1')
self._toc[key] = value
self._dirty = True
def __delitem__(self, key):
if not isinstance(key, (str, unicode)):
raise TypeError('Keys must be of string type')
if not isinstance(key, bytes):
key = key.encode('latin_1')
del self._toc[key]
self._dirty = True
def iterkeys(self):
return iterkeys(self._toc)
def keys(self):
return iterkeys(self._toc)
def __iter__(self):
return iterkeys(self._toc)
def __contains__(self, key):
return key in self._toc
| mit |
rbaindourov/v8-inspector | Source/chrome/tools/telemetry/telemetry/page/shared_page_state_unittest.py | 5 | 3340 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tempfile
import unittest
from telemetry.core import wpr_modes
from telemetry.internal import story_runner
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.page import shared_page_state
from telemetry.unittest_util import options_for_unittests
from telemetry.user_story import user_story_set
def SetUpPageRunnerArguments(options):
parser = options.CreateParser()
story_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
story_runner.ProcessCommandLineArgs(parser, options)
class DummyTest(page_test.PageTest):
def ValidateAndMeasurePage(self, *_):
pass
class FakeNetworkController(object):
def __init__(self):
self.archive_path = None
self.wpr_mode = None
def SetReplayArgs(self, archive_path, wpr_mode, _netsim, _extra_wpr_args,
_make_javascript_deterministic=False):
self.archive_path = archive_path
self.wpr_mode = wpr_mode
class SharedPageStateTests(unittest.TestCase):
def setUp(self):
self.options = options_for_unittests.GetCopy()
SetUpPageRunnerArguments(self.options)
self.options.output_formats = ['none']
self.options.suppress_gtest_report = True
# pylint: disable=W0212
def TestUseLiveSitesFlag(self, expected_wpr_mode):
with tempfile.NamedTemporaryFile() as f:
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, page_set.PageSet())
fake_network_controller = FakeNetworkController()
run_state._PrepareWpr(fake_network_controller, f.name, None)
self.assertEquals(fake_network_controller.wpr_mode, expected_wpr_mode)
self.assertEquals(fake_network_controller.archive_path, f.name)
def testUseLiveSitesFlagSet(self):
self.options.use_live_sites = True
self.TestUseLiveSitesFlag(expected_wpr_mode=wpr_modes.WPR_OFF)
def testUseLiveSitesFlagUnset(self):
self.TestUseLiveSitesFlag(expected_wpr_mode=wpr_modes.WPR_REPLAY)
def testConstructorCallsSetOptions(self):
test = DummyTest()
shared_page_state.SharedPageState(test, self.options, page_set.PageSet())
self.assertEqual(test.options, self.options)
def assertUserAgentSetCorrectly(
self, shared_page_state_class, expected_user_agent):
us = page.Page(
'http://www.google.com',
shared_page_state_class=shared_page_state_class)
test = DummyTest()
uss = user_story_set.UserStorySet()
uss.AddUserStory(us)
us.shared_state_class(test, self.options, uss)
browser_options = self.options.browser_options
actual_user_agent = browser_options.browser_user_agent_type
self.assertEqual(expected_user_agent, actual_user_agent)
def testPageStatesUserAgentType(self):
self.assertUserAgentSetCorrectly(
shared_page_state.SharedMobilePageState, 'mobile')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedDesktopPageState, 'desktop')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedTabletPageState, 'tablet')
self.assertUserAgentSetCorrectly(
shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
| bsd-3-clause |
valentin-krasontovitsch/ansible | test/units/modules/network/f5/test_bigip_firewall_address_list.py | 21 | 4606 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_address_list import ApiParameters
from library.modules.bigip_firewall_address_list import ModuleParameters
from library.modules.bigip_firewall_address_list import ModuleManager
from library.modules.bigip_firewall_address_list import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_firewall_address_list import ApiParameters
from ansible.modules.network.f5.bigip_firewall_address_list import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_address_list import ModuleManager
from ansible.modules.network.f5.bigip_firewall_address_list import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='this is a description',
addresses=['1.1.1.1', '2.2.2.2'],
address_ranges=['3.3.3.3-4.4.4.4', '5.5.5.5-6.6.6.6'],
address_lists=['/Common/foo', 'foo']
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'this is a description'
assert len(p.addresses) == 2
assert len(p.address_ranges) == 2
assert len(p.address_lists) == 2
def test_api_parameters(self):
args = load_fixture('load_security_address_list_1.json')
p = ApiParameters(params=args)
assert len(p.addresses) == 2
assert len(p.address_ranges) == 2
assert len(p.address_lists) == 1
assert len(p.fqdns) == 1
assert len(p.geo_locations) == 5
assert sorted(p.addresses) == ['1.1.1.1', '2700:bc00:1f10:101::6']
assert sorted(p.address_ranges) == ['2.2.2.2-3.3.3.3', '5.5.5.5-6.6.6.6']
assert p.address_lists[0] == '/Common/foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='this is a description',
addresses=['1.1.1.1', '2.2.2.2'],
address_ranges=['3.3.3.3-4.4.4.4', '5.5.5.5-6.6.6.6'],
address_lists=['/Common/foo', 'foo'],
geo_locations=[
dict(country='US', region='Los Angeles'),
dict(country='China'),
dict(country='EU')
],
fqdns=['google.com', 'mit.edu'],
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert 'addresses' in results
assert 'address_lists' in results
assert 'address_ranges' in results
assert len(results['addresses']) == 2
assert len(results['address_ranges']) == 2
assert len(results['address_lists']) == 2
assert results['description'] == 'this is a description'
| gpl-3.0 |
dgjnpr/py-junos-eznc | lib/jnpr/junos/factory/view.py | 1 | 8729 | import warnings
from contextlib import contextmanager
from copy import deepcopy
from lxml import etree
from jnpr.junos.factory.viewfields import ViewFields
class View(object):
"""
View is the base-class that makes extracting values from XML
data appear as objects with attributes.
"""
ITEM_NAME_XPATH = 'name'
FIELDS = {}
GROUPS = None
# -------------------------------------------------------------------------
# CONSTRUCTOR
# -------------------------------------------------------------------------
def __init__(self, table, view_xml):
"""
:table:
instance of the RunstatTable
:view_xml:
this should be an lxml etree Elemenet object. This
constructor also accepts a list with a single item/XML
"""
# if as_xml is passed as a list, make sure it only has
# a single item, common response from an xpath search
if isinstance(view_xml, list):
if 1 == len(view_xml):
view_xml = view_xml[0]
else:
raise ValueError("constructor only accepts a single item")
# now ensure that the thing provided is an lxml etree Element
if not isinstance(view_xml, etree._Element):
raise ValueError("constructor only accecpts lxml.etree._Element")
self._table = table
self.ITEM_NAME_XPATH = table.ITEM_NAME_XPATH
self._init_xml(view_xml)
def _init_xml(self, given_xml):
self._xml = given_xml
if self.GROUPS is not None:
self._groups = {}
for xg_name, xg_xpath in self.GROUPS.items():
xg_xml = self._xml.xpath(xg_xpath)
# @@@ this is technically an error; need to trap it
if not len(xg_xml):
continue
self._groups[xg_name] = xg_xml[0]
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def T(self):
""" return the Table instance for the View """
return self._table
@property
def D(self):
""" return the Device instance for this View """
return self.T.D
@property
def name(self):
""" return the name of view item """
if self.ITEM_NAME_XPATH is None:
return self._table.D.hostname
if isinstance(self.ITEM_NAME_XPATH, str):
# simple key
return self._xml.findtext(self.ITEM_NAME_XPATH).strip()
else:
# composite key
# return tuple([self.xml.findtext(i).strip() for i in
# self.ITEM_NAME_XPATH])
return tuple([self.xml.xpath(i)[0].text.strip()
for i in self.ITEM_NAME_XPATH])
# ALIAS key <=> name
key = name
@property
def xml(self):
""" returns the XML associated to the item """
return self._xml
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def keys(self):
""" list of view keys, i.e. field names """
return self.FIELDS.keys()
def values(self):
""" list of view values """
return [getattr(self, field) for field in self.keys()]
def items(self):
""" list of tuple(key,value) """
return zip(self.keys(), self.values())
def _updater_instance(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS = deepcopy(self.__class__.FIELDS)
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS = deepcopy(self.__class__.GROUPS)
self.GROUPS.update(more.groups)
def _updater_class(self, more):
""" called from extend """
if hasattr(more, 'fields'):
self.FIELDS.update(more.fields.end)
if hasattr(more, 'groups'):
self.GROUPS.update(more.groups)
@contextmanager
def updater(self, fields=True, groups=False, all=True, **kvargs):
"""
provide the ability for subclassing objects to extend the
definitions of the fields. this is implemented as a
context manager with the form called from the subclass
constructor:
with self.extend() as more:
more.fields = <dict>
more.groups = <dict> # optional
"""
# ---------------------------------------------------------------------
# create a new object class so we can attach stuff to it arbitrarily.
# then pass that object to the caller, yo!
# ---------------------------------------------------------------------
more = type('RunstatViewMore', (object,), {})()
if fields is True:
more.fields = RunstatMakerViewFields()
# ---------------------------------------------------------------------
# callback through context manager
# ---------------------------------------------------------------------
yield more
updater = self._updater_class if all is True else \
self._updater_instance
updater(more)
def asview(self, view_cls):
""" create a new View object for this item """
return view_cls(self._table, self._xml)
def refresh(self):
"""
~~~ EXPERIMENTAL ~~~
refresh the data from the Junos device. this only works if the table
provides an "args_key", does not update the original table, just this
specific view/item
"""
warnings.warn("Experimental method: refresh")
if self._table.can_refresh is not True:
raise RuntimeError("table does not support this feature")
# create a new table instance that gets only the specific named
# value of this view
tbl_xml = self._table._rpc_get(self.name)
new_xml = tbl_xml.xpath(self._table.ITEM_XPATH)[0]
self._init_xml(new_xml)
return self
# -------------------------------------------------------------------------
# OVERLOADS
# -------------------------------------------------------------------------
def __repr__(self):
""" returns the name of the View with the associate item name """
return "%s:%s" % (self.__class__.__name__, self.name)
def __getattr__(self, name):
"""
returns a view item value, called as :obj.name:
"""
item = self.FIELDS.get(name)
if item is None:
raise ValueError("Unknown field: '%s'" % name)
if 'table' in item:
# if this is a sub-table, then return that now
return item['table'](self.D, self._xml)
# otherwise, not a sub-table, and handle the field
astype = item.get('astype', str)
if 'group' in item:
found = self._groups[item['group']].xpath(item['xpath'])
else:
found = self._xml.xpath(item['xpath'])
len_found = len(found)
if astype is bool:
# handle the boolean flag case separately
return bool(len_found)
if not len_found:
# even for the case of numbers, do not set the value. we
# want to detect "does not exist" vs. defaulting to 0
# -- 2013-nov-19, JLS.
return None
try:
# added exception handler to catch malformed xpath expressesion
# -- 2013-nov-19, JLS.
# added support to handle multiple xpath values, i.e. a list of
# things that have the same xpath expression (common in configs)
# -- 2031-dec-06, JLS
# added support to use the element tag if the text is empty
def _munch(x):
as_str = x if isinstance(x, str) else x.text
if as_str is not None:
as_str = as_str.strip()
if not as_str:
as_str = x.tag # use 'not' to test for empty
return astype(as_str)
if 1 == len_found:
return _munch(found[0])
return [_munch(this) for this in found]
except:
raise RuntimeError("Unable to handle field:'%s'" % name)
# and if we are here, then we didn't handle the field.
raise RuntimeError("Unable to handle field:'%s'" % name)
def __getitem__(self, name):
"""
allow the caller to extract field values using :obj['name']:
the same way they would do :obj.name:
"""
return getattr(self, name)
| apache-2.0 |
aferr/TimingCompartments | src/mem/slicc/ast/ChipComponentAccessAST.py | 16 | 6784 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from slicc.ast.ExprAST import ExprAST
from slicc.symbols import Type
class ChipComponentAccessAST(ExprAST):
def __init__(self, slicc, machine, mach_version, component):
super(ChipComponentAccessAST, self).__init__(slicc)
self.mach_var = machine
self.comp_var = component
self.mach_ver_expr = mach_version
def __repr__(self):
return "[ChipAccessExpr: %r]" % self.expr_vec
def generate(self, code):
void_type = self.symtab.find("void", Type)
mname = self.mach_var.name
cname = self.comp_var.name
var = self.symtab.machine_components[mname][cname]
vcode = str(var.code)
if self.chip_ver_expr is not None:
# replace self.chip with specified chip
gcode = "g_system.getChip(%s)" % self.chip_ver_expr.inline()
vcode = re.sub("m_chip", gcode, vcode)
# replace default "m_version" with the version we really want
gcode = "(%s)" % self.mach_ver_expr.inline()
vcode = re.sub("m_version", gcode, vcode)
return_type, gcode = self.generate_access(var)
code("($vcode)$gcode")
return return_type
class ChipMethodAccessAST(ChipComponentAccessAST):
def __init__(self, slicc, chip_version, machine, mach_version, component,
proc_name, expr_vec):
s = super(ChipMethodAccessAST, self)
s.__init__(slicc, machine, mach_version, component)
self.chip_ver_expr = chip_version
self.expr_vec = expr_vec
self.proc_name = proc_name
def generate_access(self, var):
# generate code
paramTypes = []
gcode = []
for expr in self.expr_vec:
t,c = expr.generate()
paramTypes.append(t)
gcode.append(c)
methodId = var.type.methodId(self.proc_name, paramTypes)
# Verify that this is a method of the object
if not var.type.methodExist(methodId):
self.error("%s: Type '%s' does not have a method '%s'" % \
("Invalid method call", var.type, methodId))
expected_size = len(var.type.methodParamType(methodId))
if len(self.expr_vec) != expected_size:
# Right number of parameters
self.error("Wrong number of parameters for function name: " +\
"'%s', expected: %d, actual: %d",
self.proc_name, expected_size, len(self.expr_vec))
for expr,expected,actual in zip(self.expr_vec,
var.type.methodParamType(methodId),
paramTypes):
# Check the types of the parameter
if actual != expected:
expr.error("Type mismatch: expected: %s actual: %s",
expected, actual)
# method call
code = ".%s(%s)" % (self.proc_name, ', '.join(gcode))
# Return the return type of the method
return var.type.methodReturnType(methodId), code
class LocalChipMethodAST(ChipMethodAccessAST):
# method call from local chip
def __init__(self, slicc, machine, mach_version, component, proc_name,
expr_vec):
s = super(LocalChipMethodAST, self)
s.__init__(slicc, None, machine, mach_version, component, proc_name,
expr_vec)
class SpecifiedChipMethodAST(ChipMethodAccessAST):
# method call from specified chip
def __init__(self, slicc, chip_version, machine, mach_version, component,
proc_name, expr_vec):
s = super(SpecifiedChipMethodAST, self)
s.__init__(slicc, chip_version, machine, mach_version, component,
proc_name, expr_vec)
class ChipMemberAccessAST(ChipComponentAccessAST):
# member access from specified chip
def __init__(self, chip_version, machine, mach_version, component,
field_name):
s = super(ChipMemberAccessAST, self)
s.__init__(slicc, machine, mach_version, component)
self.chip_ver_expr = chip_version
self.field_name = field_name
def generate_access(self, var):
# Verify that this is a valid field name for this type
if not var.type.dataMemberExist(self.field_name):
self.error("Invalid object field: " +\
"Type '%s' does not have data member %s",
var.type, self.field_name)
code += ").m_%s" % self.field_name
return var.type.dataMemberType(self.field_name), code
class LocalChipMemberAST(ChipMemberAccessAST):
# member access from local chip
def __init__(self, slicc, machine, mach_version, component, field_name):
s = super(LocalChipMemberAST, self)
s.__init__(slicc, None, machine, mach_version, component, field_name)
class SpecifiedChipMemberAST(ChipMemberAccessAST):
# member access from specified chip
def __init__(self, chip_version, machine, mach_version, component,
field_name):
s = super(SpecifiedChipMemberAST, self)
s.__init__(slicc, chip_version, machine, mach_version, component,
field_name)
| bsd-3-clause |
etherkit/OpenBeacon2 | client/win/venv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/dom.py | 74 | 8925 | from __future__ import absolute_import, division, unicode_literals
try:
from collections.abc import MutableMapping
except ImportError: # Python 2.7
from collections import MutableMapping
from xml.dom import minidom, Node
import weakref
from . import base
from .. import constants
from ..constants import namespaces
from .._utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(MutableMapping):
def __init__(self, element):
self.element = element
def __iter__(self):
return iter(self.element.attributes.keys())
def __setitem__(self, name, value):
if isinstance(name, tuple):
raise NotImplementedError
else:
attr = self.element.ownerDocument.createAttribute(name)
attr.value = value
self.element.attributes[name] = attr
def __len__(self):
return len(self.element.attributes)
def items(self):
return list(self.element.attributes.items())
def values(self):
return list(self.element.attributes.values())
def __getitem__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.attributes[name].value
def __delitem__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
del self.element.attributes[name]
class NodeBuilder(base.Node):
def __init__(self, element):
base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI") and
self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
# pylint:disable=protected-access
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| gpl-3.0 |
kobejean/tensorflow | tensorflow/python/compat/compat_test.py | 31 | 2573 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for forward and backwards compatibility utilties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.compat import compat
from tensorflow.python.platform import test
class CompatTest(test.TestCase):
def _compatibility_date(self):
date = compat._FORWARD_COMPATIBILITY_HORIZON # pylint: disable=protected-access
return (date.year, date.month, date.day)
def _n_days_after(self, n):
date = compat._FORWARD_COMPATIBILITY_HORIZON + datetime.timedelta(days=n) # pylint: disable=protected-access
return (date.year, date.month, date.day)
def test_basic(self):
compatibility_date = self._compatibility_date()
one_day_before = self._n_days_after(-1)
self.assertTrue(compat.forward_compatible(*one_day_before))
self.assertFalse(compat.forward_compatible(*compatibility_date))
def test_decorator(self):
compatibility_date = self._compatibility_date()
one_day_after = self._n_days_after(1)
with compat.forward_compatibility_horizon(*one_day_after):
self.assertTrue(compat.forward_compatible(*compatibility_date))
self.assertFalse(compat.forward_compatible(*one_day_after))
# After exiting context manager, value should be reset.
self.assertFalse(compat.forward_compatible(*compatibility_date))
def test_decorator_with_failure(self):
compatibility_date = self._compatibility_date()
one_day_after = self._n_days_after(1)
class DummyError(Exception):
pass
try:
with compat.forward_compatibility_horizon(*one_day_after):
raise DummyError()
except DummyError:
pass # silence DummyError
# After exiting context manager, value should be reset.
self.assertFalse(compat.forward_compatible(*compatibility_date))
if __name__ == '__main__':
test.main()
| apache-2.0 |
infoxchange/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/tests/test_spatialrefsys.py | 154 | 6782 | from django.db import connection
from django.contrib.gis.gdal import GDAL_VERSION
from django.contrib.gis.tests.utils import mysql, no_mysql, oracle, postgis, spatialite
from django.utils import unittest
test_srs = ({'srid' : 4326,
'auth_name' : ('EPSG', True),
'auth_srid' : 4326,
'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
'spheroid' : 'WGS 84', 'name' : 'WGS 84',
'geographic' : True, 'projected' : False, 'spatialite' : True,
'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 1, 9),
},
{'srid' : 32140,
'auth_name' : ('EPSG', False),
'auth_srid' : 32140,
'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]',
'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ',
'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central',
'geographic' : False, 'projected' : True, 'spatialite' : False,
'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 5, 10),
},
)
if oracle:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
class SpatialRefSysTest(unittest.TestCase):
@no_mysql
def test01_retrieve(self):
"Testing retrieval of SpatialRefSys model objects."
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
self.assertEqual(sd['proj4'], srs.proj4text)
@no_mysql
def test02_osr(self):
"Testing getting OSR objects from SpatialRefSys model objects."
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
if GDAL_VERSION <= (1, 8):
self.assertEqual(sd['proj4'], srs.proj4)
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite
if not spatialite:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
@no_mysql
def test03_ellipsoid(self):
"Testing the ellipsoid property."
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
param1 = ellps1[i]
param2 = ellps2[i]
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(SpatialRefSysTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| gpl-3.0 |
eckucukoglu/arm-linux-gnueabihf | lib/python2.7/encodings/iso2022_jp_3.py | 816 | 1061 | #
# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_3')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-2.0 |
TrampolineRTOS/GTL | build/libpm/python-makefiles/mingw32_on_macosx_gcc_tools.py | 2 | 2932 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
#----------------------------------------------------------------------------------------------------------------------*
import sys, time, os, json
import makefile, default_build_options
import generic_galgas_makefile
import tool_chain_installation_path
import cross_compiler_download
#----------------------------------------------------------------------------------------------------------------------*
def buildForWin32OnMacOSX (dictionary, jsonFilePath, EXECUTABLE, GOAL, maxParallelJobs, displayCommands) :
#--- Too chain installation
GCC_VERSION = "7.2.0"
BINUTILS_VERSION = "2.28"
TOOL_CHAIN_NAME = "binutils-" + BINUTILS_VERSION + "-gcc-" + GCC_VERSION + "-for-mingw32"
installDir = tool_chain_installation_path.toolChainInstallationPath ()
TOOL_CHAIN_INSTALL_PATH = installDir + "/" + TOOL_CHAIN_NAME
if not os.path.exists (TOOL_CHAIN_INSTALL_PATH):
cross_compiler_download.downloadToolChain (TOOL_CHAIN_NAME)
#---
gmf = generic_galgas_makefile.GenericGalgasMakefile ()
gmf.mJSONfilePath = jsonFilePath
gmf.mDictionary = dictionary
gmf.mExecutable = EXECUTABLE
gmf.mGoal = GOAL
gmf.mMaxParallelJobs = maxParallelJobs
gmf.mDisplayCommands = displayCommands
gmf.mTargetName = "win32"
gmf.mLinkerOptions = ["-lws2_32", "-lComdlg32"]
gmf.mExecutableSuffix = ".exe"
#---
gmf.mCompilerTool = [TOOL_CHAIN_INSTALL_PATH + "/bin/i586-mingw32-gcc", "-m32", "-D_WIN32_WINNT=0x501"]
gmf.mLinkerTool = [TOOL_CHAIN_INSTALL_PATH + "/bin/i586-mingw32-g++", "-m32", "--enable-auto-import", "-Wl,--gc-sections"]
gmf.mStripTool = [TOOL_CHAIN_INSTALL_PATH + "/bin/i586-mingw32-strip", "--strip-all"]
gmf.mCompilationMessage = "Compiling for Win32"
gmf.mLinkingMessage = "Linking for Win32"
gmf.mStripMessage = "Stripping"
#--- Options for all compilers
gmf.mAllCompilerOptions = default_build_options.allCompilerOptions (["-Wconversion"])
#--- Options for release mode
gmf.mCompilerReleaseOptions = default_build_options.compilerReleaseOptions (["-O2"])
#--- Options for debug mode
gmf.mCompilerDebugOptions = default_build_options.compilerDebugOptions ([])
#--- Options for C compiling (.c extension)
gmf.m_C_CompilerOptions = default_build_options.C_CompilerOptions ([])
#--- Options for C++ compiling (.cpp extension)
gmf.m_Cpp_CompilerOptions = default_build_options.Cpp_CompilerOptions (["-Weffc++", "-Wsign-promo"])
#--- Options for Objective-C compiling (.m extension)
gmf.m_ObjectiveC_CompilerOptions = default_build_options.ObjectiveC_CompilerOptions ([])
#--- Options for Objective-C++ compiling (.mm extension)
gmf.m_ObjectiveCpp_CompilerOptions = default_build_options.ObjectiveCpp_CompilerOptions ([])
#--- Library to use for gmp
gmf.mCrossCompilation = "win32"
#--- Run makefile
gmf.run ()
#----------------------------------------------------------------------------------------------------------------------*
| gpl-2.0 |
2baOrNot2ba/AntPat | scripts/viewJonespat_dual.py | 1 | 2897 | #!/usr/bin/env python
"""A simple viewer for Jones patterns for dual-polarized representations.
"""
import argparse
import numpy
import matplotlib.pyplot as plt
from antpat.reps.sphgridfun.pntsonsphere import ZenHemisphGrid
from antpat.dualpolelem import DualPolElem, jones2gIXR, IXRJ2IXRM
from antpat.reps.hamaker import convLOFARcc2DPE
import antpat.io.filetypes as antfiles
def plotJonesCanonical(theta, phi, jones, dpelemname):
normalize = True
dbscale = True
polarplt = True
IXRTYPE = 'IXR_J' # Can be IXR_J or IXR_M
g, IXRJ = jones2gIXR(jones)
IXRM = IXRJ2IXRM(IXRJ)
if IXRTYPE == 'IXR_J':
IXR = IXRJ
elif IXRTYPE == 'IXR_J':
IXR = IXRM
else:
raise RuntimeError("""Error: IXR type {} unknown.
Known types are IXR_J, IXR_M.""".format(IXRTYPE))
fig = plt.figure()
fig.suptitle(dpelemname)
plt.subplot(121, polar=polarplt)
if normalize:
g_max = numpy.max(g)
g = g/g_max
if dbscale:
g = 20*numpy.log10(g)
# nrlvls = 5
# g_lvls = numpy.max(g) - 3.0*numpy.arange(nrlvls)
plt.pcolormesh(phi, numpy.rad2deg(theta), g)
# plt.contour( phi, numpy.rad2deg(theta), g_dress, levels = g_lvls)
plt.colorbar()
plt.title('Amp gain')
plt.subplot(122, polar=polarplt)
plt.pcolormesh(phi, numpy.rad2deg(theta), 10*numpy.log10(IXR))
plt.colorbar()
plt.title('IXR_J')
plt.show()
def plotFFpat():
from antpat.reps.sphgridfun import tvecfun
for polchan in [0, 1]:
E_th = jones[:, :, polchan, 0].squeeze()
E_ph = jones[:, :, polchan, 1].squeeze()
tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph, args.freq,
vcoordlist=['Ludwig3'], projection='orthographic')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("freq", type=float,
help="Frequency in Hertz")
parser.add_argument("filename", help="""
Filename of dual-polarization FF, Hamaker-Arts format,
or a single-polarization FF (p-channel)""")
parser.add_argument("filename_q", nargs='?',
help="""
Filename of second (q-channel) single-polarization FF.
""")
args = parser.parse_args()
if args.filename.endswith(antfiles.HamArtsuffix):
hp = convLOFARcc2DPE(args.filename, [args.freq])
elif args.filename.endswith(antfiles.FEKOsuffix):
hp = DualPolElem()
hp.load_ffes(args.filename, args.filename_q)
else:
raise RuntimeError("dual-pol pattern file type not known")
THETA, PHI = ZenHemisphGrid()
jones = hp.getJonesAlong([args.freq], (THETA, PHI))
plotFFpat()
# plotJonesCanonical(THETA, PHI, jones, os.path.basename(args.filename)
# + ' (' + str(args.freq/1e6) + ' MHz)')
| isc |
sv1jsb/dwitter | auth/management/commands/createsuperuser.py | 2 | 5401 | """
Management utility to create superusers.
"""
import getpass
import re
import sys
from optparse import make_option
from auth.models import User
from auth.management import get_default_username
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.translation import ugettext as _
RE_VALID_USERNAME = re.compile('[\w.@+-]+$')
EMAIL_RE = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}$', re.IGNORECASE) # domain
def is_valid_email(value):
if not EMAIL_RE.search(value):
raise exceptions.ValidationError(_('Enter a valid e-mail address.'))
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--username', dest='username', default=None,
help='Specifies the username for the superuser.'),
make_option('--email', dest='email', default=None,
help='Specifies the email address for the superuser.'),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help=('Tells Django to NOT prompt the user for input of any kind. '
'You must use --username and --email with --noinput, and '
'superusers created with --noinput will not be able to log '
'in until they\'re given a valid password.')),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'),
)
help = 'Used to create a superuser.'
def handle(self, *args, **options):
username = options.get('username', None)
email = options.get('email', None)
interactive = options.get('interactive')
verbosity = int(options.get('verbosity', 1))
database = options.get('database')
# Do quick and dirty validation if --noinput
if not interactive:
if not username or not email:
raise CommandError("You must use --username and --email with --noinput.")
if not RE_VALID_USERNAME.match(username):
raise CommandError("Invalid username. Use only letters, digits, and underscores")
try:
is_valid_email(email)
except exceptions.ValidationError:
raise CommandError("Invalid email address.")
# If not provided, create the user with an unusable password
password = None
# Prompt for username/email/password. Enclose this whole thing in a
# try/except to trap for a keyboard interrupt and exit gracefully.
if interactive:
default_username = get_default_username()
try:
# Get a username
while 1:
if not username:
input_msg = 'Username'
if default_username:
input_msg += ' (leave blank to use %r)' % default_username
username = raw_input(input_msg + ': ')
if default_username and username == '':
username = default_username
if not RE_VALID_USERNAME.match(username):
sys.stderr.write("Error: That username is invalid. Use only letters, digits and underscores.\n")
username = None
continue
try:
User.objects.using(database).get(username=username)
except User.DoesNotExist:
break
else:
sys.stderr.write("Error: That username is already taken.\n")
username = None
# Get an email
while 1:
if not email:
email = raw_input('E-mail address: ')
try:
is_valid_email(email)
except exceptions.ValidationError:
sys.stderr.write("Error: That e-mail address is invalid.\n")
email = None
else:
break
# Get a password
while 1:
if not password:
password = getpass.getpass()
password2 = getpass.getpass('Password (again): ')
if password != password2:
sys.stderr.write("Error: Your passwords didn't match.\n")
password = None
continue
if password.strip() == '':
sys.stderr.write("Error: Blank passwords aren't allowed.\n")
password = None
continue
break
except KeyboardInterrupt:
sys.stderr.write("\nOperation cancelled.\n")
sys.exit(1)
User.objects.db_manager(database).create_superuser(username, email, password)
if verbosity >= 1:
self.stdout.write("Superuser created successfully.\n")
| bsd-3-clause |
sanjeevtripurari/hue | desktop/core/ext-py/Django-1.6.10/tests/model_regress/models.py | 134 | 2195 | # coding: utf-8
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
#5218: Test models with non-default primary keys / AutoFields
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
#18432: Chained foreign keys with to_field produce incorrect query
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, unique=True, to_field='model1')
| apache-2.0 |
kelvindk/Video-Stabilization | boost_1_42_0/tools/build/v2/test/indirect_conditional.py | 20 | 1389 | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester()
t.write("jamroot.jam", """
exe a1 : a1.cpp : <conditional>@a1-rule ;
rule a1-rule ( properties * )
{
if <variant>debug in $(properties)
{
return <define>OK ;
}
}
exe a2 : a2.cpp : <conditional>@$(__name__).a2-rule
<variant>debug:<optimization>speed ;
rule a2-rule ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
exe a3 : a3.cpp : <conditional>@$(__name__).a3-rule-1
<conditional>@$(__name__).a3-rule-2 ;
rule a3-rule-1 ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
rule a3-rule-2 ( properties * )
{
if <variant>debug in $(properties)
{
return <optimization>speed ;
}
}
""")
t.write("a1.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.write("a2.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.write("a3.cpp", """
#ifdef OK
int main() {}
#endif
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a1.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a2.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a3.exe")
t.cleanup()
| gpl-3.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.7.2/Lib/hashlib.py | 110 | 5013 | # $Id$
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are
guaranteed to exist.
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms')
def __get_builtin_constructor(name):
try:
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
except ImportError:
pass # no extension module, this hash is unsupported.
raise ValueError('unsupported hash type %s' % name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
| mit |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/lines.py | 69 | 48233 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8)
# COVERAGE NOTE: Never called internally or from examples
def unmasked_index_ranges(mask, compressed = True):
warnings.warn("Import this directly from matplotlib.cbook",
DeprecationWarning)
# Warning added 2008/07/22
from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges
return _unmasked_index_ranges(mask, compressed=compressed)
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
markers = _markers = { # hidden names deprecated
'.' : '_draw_point',
',' : '_draw_pixel',
'o' : '_draw_circle',
'v' : '_draw_triangle_down',
'^' : '_draw_triangle_up',
'<' : '_draw_triangle_left',
'>' : '_draw_triangle_right',
'1' : '_draw_tri_down',
'2' : '_draw_tri_up',
'3' : '_draw_tri_left',
'4' : '_draw_tri_right',
's' : '_draw_square',
'p' : '_draw_pentagon',
'*' : '_draw_star',
'h' : '_draw_hexagon1',
'H' : '_draw_hexagon2',
'+' : '_draw_plus',
'x' : '_draw_x',
'D' : '_draw_diamond',
'd' : '_draw_thin_diamond',
'|' : '_draw_vline',
'_' : '_draw_hline',
TICKLEFT : '_draw_tickleft',
TICKRIGHT : '_draw_tickright',
TICKUP : '_draw_tickup',
TICKDOWN : '_draw_tickdown',
CARETLEFT : '_draw_caretleft',
CARETRIGHT : '_draw_caretright',
CARETUP : '_draw_caretup',
CARETDOWN : '_draw_caretdown',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
filled_markers = ('o', '^', 'v', '<', '>',
's', 'd', 'D', 'h', 'H', 'p', '*')
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self.set_marker(marker)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self._point_size_reduction = 0.5
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalid = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalid:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def setpickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker is not None:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
not_masked = 0
if not ma.isMaskedArray(x):
x = np.asarray(x)
not_masked += 1
if not ma.isMaskedArray(y):
y = np.asarray(y)
not_masked += 1
if (not_masked < 2 or
(x is not self._xorig and
(x.shape != self._xorig.shape or np.any(x != self._xorig))) or
(y is not self._yorig and
(y.shape != self._yorig.shape or np.any(y != self._yorig)))):
self._xorig = x
self._yorig = y
self._invalid = True
def recache(self):
#if self.axes is None: print 'recache no axes'
#else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
x = ma.asarray(self.convert_xunits(self._xorig), float)
y = ma.asarray(self.convert_yunits(self._yorig), float)
x = ma.ravel(x)
y = ma.ravel(y)
else:
x = np.asarray(self.convert_xunits(self._xorig), float)
y = np.asarray(self.convert_yunits(self._yorig), float)
x = np.ravel(x)
y = np.ravel(y)
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, float)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, float)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
# Masked arrays are now handled by the Path class itself
self._path = Path(self._xy)
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalid = True
# self._transformed_path = TransformedPath(self._path, self.get_transform())
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
def draw(self, renderer):
if self._invalid:
self.recache()
renderer.open_group('line2d')
if not self._visible: return
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
funcname = self._markers.get(self._marker, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
markerFunc = getattr(self, funcname)
markerFunc(renderer, gc, tpath, affine.frozen())
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker in self.filled_markers:
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def get_markerfacecolor(self):
if (self._markerfacecolor is None or
(is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower()=='none') ):
return self._markerfacecolor
elif (is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower() == 'auto'):
return self._color
else:
return self._markerfacecolor
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalid:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalid:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalid:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalid:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and
any drawstyle in combination with a linestyle, e.g. 'steps--'.
"""
# handle long drawstyle names before short ones !
for ds in flatten([k.keys() for k in (self._drawStyles_l,
self._drawStyles_s)], is_string_like):
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
def set_marker(self, marker):
"""
Set the line marker
========== ==========================
marker description
========== ==========================
'.' point
',' pixel
'o' circle
'v' triangle_down
'^' triangle_up
'<' triangle_left
'>' triangle_right
'1' tri_down
'2' tri_up
'3' tri_left
'4' tri_right
's' square
'p' pentagon
'*' star
'h' hexagon1
'H' hexagon2
'+' plus
'x' x
'D' diamond
'd' thin_diamond
'|' vline
'_' hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
'None' nothing
' ' nothing
'' nothing
========== ==========================
ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4'
| '<' | '>' | 'D' | 'H' | '^' | '_' | 'd'
| 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|'
| TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT
| 'None' | ' ' | '' ]
"""
if marker not in self._markers:
verbose.report('Unrecognized marker style %s, %s' %
(marker, type(marker)))
if marker in [' ','']:
marker = 'None'
self._marker = marker
self._markerFunc = self._markers[marker]
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color
ACCEPTS: any matplotlib color
"""
if fc is None :
fc = 'auto'
self._markerfacecolor = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
x = np.asarray(x)
self.set_data(x, self._yorig)
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
y = np.asarray(y)
self.set_data(self._xorig, y)
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_nothing(self, *args, **kwargs):
pass
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def _draw_point(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * \
self._point_size_reduction * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_draw_pixel_transform = Affine2D().translate(-0.5, -0.5)
def _draw_pixel(self, renderer, gc, path, path_trans):
rgbFace = self._get_rgb_face()
gc.set_snap(False)
renderer.draw_markers(gc, Path.unit_rectangle(),
self._draw_pixel_transform,
path, path_trans, rgbFace)
def _draw_circle(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w, w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]])
def _draw_triangle_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, -offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(-90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_square(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_thin_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5) \
.rotate_deg(45).scale(offset * 0.6, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_pentagon(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform,
path, path_trans, rgbFace)
def _draw_star(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
_starpath = Path.unit_regular_star(5, innerCircle=0.381966)
renderer.draw_markers(gc, _starpath, transform,
path, path_trans, rgbFace)
def _draw_hexagon1(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
def _draw_hexagon2(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(30)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _draw_vline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
def _draw_hline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _draw_tickleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(-offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
def _draw_tickright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _draw_tickup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
def _draw_tickdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, -offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_plus(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._plus_path, transform,
path, path_trans)
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_tri_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _draw_caretdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_x(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._x_path, transform,
path, path_trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = other._marker
self._drawstyle = other._drawstyle
def _get_rgb_face(self):
facecolor = self.get_markerfacecolor()
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = Line2D._markers
drawStyles = Line2D.drawStyles
artist.kwdocd['Line2D'] = artist.kwdoc(Line2D)
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
| agpl-3.0 |
chirilo/remo | vendor-local/lib/python/south/utils.py | 32 | 1969 | """
Generally helpful utility functions.
"""
def _ask_for_it_by_name(name):
"Returns an object referenced by absolute path."
bits = name.split(".")
## what if there is no absolute reference?
if len(bits)>1:
modulename = ".".join(bits[:-1])
else:
modulename=bits[0]
module = __import__(modulename, {}, {}, bits[-1])
if len(bits) == 1:
return module
else:
return getattr(module, bits[-1])
def ask_for_it_by_name(name):
"Returns an object referenced by absolute path. (Memoised outer wrapper)"
if name not in ask_for_it_by_name.cache:
ask_for_it_by_name.cache[name] = _ask_for_it_by_name(name)
return ask_for_it_by_name.cache[name]
ask_for_it_by_name.cache = {}
def get_attribute(item, attribute):
"""
Like getattr, but recursive (i.e. you can ask for 'foo.bar.yay'.)
"""
value = item
for part in attribute.split("."):
value = getattr(value, part)
return value
def auto_through(field):
"Returns if the M2M class passed in has an autogenerated through table or not."
return (
# Django 1.0/1.1
(not field.rel.through)
or
# Django 1.2+
getattr(getattr(field.rel.through, "_meta", None), "auto_created", False)
)
def auto_model(model):
"Returns if the given model was automatically generated."
return getattr(model._meta, "auto_created", False)
def memoize(function):
"Standard memoization decorator."
name = function.__name__
_name = '_' + name
def method(self):
if not hasattr(self, _name):
value = function(self)
setattr(self, _name, value)
return getattr(self, _name)
def invalidate():
if hasattr(method, _name):
delattr(method, _name)
method.__name__ = function.__name__
method.__doc__ = function.__doc__
method._invalidate = invalidate
return method
| bsd-3-clause |
cluckmaster/MissionPlanner | Lib/abc.py | 88 | 7330 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-3.0 |
sumanthha/kannadaflix | django/core/mail/__init__.py | 80 | 4678 | """
Tools for sending email.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Imported for backwards compatibility, and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.utils import CachedDnsName, DNS_NAME
from django.core.mail.message import (
EmailMessage, EmailMultiAlternatives,
SafeMIMEText, SafeMIMEMultipart,
DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid,
BadHeaderError, forbid_multi_line_headers)
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an email backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
path = backend or settings.EMAIL_BACKEND
try:
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError, e:
raise ImproperlyConfigured(('Error importing email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
return EmailMessage(subject, message, from_email, recipient_list,
connection=connection).send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of emails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
| bsd-3-clause |
bgmerrell/desmod | tests/test_timescale.py | 1 | 1953 | import pytest
from desmod.timescale import parse_time, scale_time
@pytest.mark.parametrize('test_input, expected', [
('12 s', (12, 's')),
('12s', (12, 's')),
('+12s', (12, 's')),
('-12s', (-12, 's')),
('12.0 s', (12.0, 's')),
('12. s', (12.0, 's')),
('+12.0 s', (12.0, 's')),
('-12.0 s', (-12.0, 's')),
('12.000 s', (12.0, 's')),
('1.2e1 s', (12.0, 's')),
('1.2e+1 s', (12.0, 's')),
('1.2e-1 s', (0.12, 's')),
('-1.2e-1 s', (-0.12, 's')),
('12.s', (12.0, 's')),
('12.0s', (12.0, 's')),
('12.000s', (12.0, 's')),
('1.2e1s', (12.0, 's')),
('.12e+2s', (12.0, 's')),
('.12s', (0.12, 's')),
('12 fs', (12, 'fs')),
('12 ps', (12, 'ps')),
('12 ns', (12, 'ns')),
('12 us', (12, 'us')),
('12 ms', (12, 'ms')),
('12.0ms', (12.0, 'ms')),
('s', (1, 's')),
('fs', (1, 'fs')),
])
def test_parse_time(test_input, expected):
m, u = parse_time(test_input)
assert (m, u) == expected
assert isinstance(m, type(expected[0]))
@pytest.mark.parametrize('test_input', [
'',
'123 s',
'123',
'123.0',
'123 S',
'123 Ms',
'123e1.3 s',
'+-123 s',
'123 ks',
'. s',
'1-.1 s',
'1e1.2 s',
])
def test_parse_time_except(test_input):
with pytest.raises(ValueError) as exc_info:
parse_time(test_input)
assert 'float' not in str(exc_info.value)
def test_parse_time_default():
assert parse_time('123', default_unit='ms') == (123, 'ms')
@pytest.mark.parametrize('input_t, input_tscale, expected', [
((1, 'us'), (1, 'us'), 1),
((1, 'us'), (10, 'us'), 0.1),
((1000, 'us'), (1, 'ms'), 1),
((1, 'us'), (100, 'ms'), 1e-5),
((50, 'ms'), (1, 'ns'), 50000000),
((5.2, 'ms'), (1, 'us'), 5200),
])
def test_scale_time(input_t, input_tscale, expected):
scaled = scale_time(input_t, input_tscale)
assert expected == scaled
assert isinstance(scaled, type(expected))
| mit |
bourguet/operator_precedence_parsing | operator_precedence.py | 1 | 7999 | #! /usr/bin/env python3
import sys
import lexer
from tree import Node, CompositeNode
class SymbolDesc:
def __init__(self, symbol, lprio, rprio, evaluator):
self.symbol = symbol
self.lprio = lprio
self.rprio = rprio
self.evaluator = evaluator
self.value = None
def __repr__(self):
return '<Symbol {} {}/{}: {}>'.format(self.symbol, self.lprio, self.rprio, self.value)
def identity_evaluator(args):
if len(args) == 1 and type(args[0]) == SymbolDesc:
return Node(args[0].symbol)
else:
return CompositeNode('ID ERROR', args)
def binary_evaluator(args):
if len(args) != 3 or type(args[0]) == SymbolDesc or type(args[1]) != SymbolDesc or type(args[2]) == SymbolDesc:
return CompositeNode('BINARY ERROR', args)
return CompositeNode(args[1].symbol, [args[0], args[2]])
class Parser:
def __init__(self):
self.symbols = {}
self.symbols['$soi$'] = SymbolDesc('$soi$', 0, 0, None)
self.symbols['$eoi$'] = SymbolDesc('$eoi$', 0, 0, None)
self.reset()
def register_symbol(self, oper, lprio, rprio, evaluator=None):
if evaluator is None:
evaluator = binary_evaluator
if type(oper) is str:
self.symbols[oper] = SymbolDesc(oper, lprio, rprio, evaluator)
else:
for op in oper:
self.symbols[op] = SymbolDesc(op, lprio, rprio, evaluator)
def reset(self):
self.stack = [self.symbols['$soi$']]
def id_symbol(self, id):
return SymbolDesc(id, 1000, 1000, identity_evaluator)
def evaluate(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
curprio = self.stack[idx].lprio
while type(self.stack[idx-1]) != SymbolDesc or self.stack[idx-1].rprio == curprio:
idx -= 1
if type(self.stack[idx]) == SymbolDesc:
curprio = self.stack[idx].lprio
args = self.stack[idx:]
self.stack = self.stack[:idx]
for i in args:
if type(i) == SymbolDesc:
self.stack.append(i.evaluator(args))
return
raise RuntimeError('Internal error: no evaluator found in {}'.format(args))
def tos_symbol(self):
idx = len(self.stack)-1
if type(self.stack[idx]) != SymbolDesc:
idx -= 1
return self.stack[idx]
def shift(self, sym):
while self.tos_symbol().rprio > sym.lprio:
self.evaluate()
self.stack.append(sym)
def push_eoi(self):
self.shift(self.symbols['$eoi$'])
def parse(self, s):
self.reset()
for tk in lexer.tokenize(s):
if tk.lexem in self.symbols:
self.shift(self.symbols[tk.lexem])
elif tk.kind == 'ID':
self.shift(self.id_symbol(tk))
elif tk.kind == 'NUMBER':
self.shift(self.id_symbol(tk))
else:
raise RuntimeError('Unexpected symbol: {}'.format(tk))
self.push_eoi()
if len(self.stack) != 3:
raise RuntimeError('Internal error: bad state of stack at end')
return self.stack[1]
def dump(self):
print('Stack')
for oper in self.stack:
print(' {}'.format(oper))
def open_parenthesis_evaluator(args):
if (len(args) == 3
and type(args[0]) == SymbolDesc and args[0].symbol == '('
and type(args[1]) != SymbolDesc
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return args[1]
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) == SymbolDesc and args[2].symbol == ')'):
return CompositeNode('call', [args[0]])
elif (len(args) == 4
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc and args[1].symbol == '('
and type(args[2]) != SymbolDesc
and type(args[3]) == SymbolDesc and args[3].symbol == ')'):
if args[2].token == ',':
callargs = args[2].children
else:
callargs = [args[2]]
callargs.insert(0, args[0])
return CompositeNode('call', callargs)
else:
return CompositeNode('( ERROR', args)
def close_parenthesis_evaluator(args):
return CompositeNode(') ERROR', args)
def open_bracket_evaluator(args):
return CompositeNode('get', [args[0], args[2]])
def close_bracket_evaluator(args):
return CompositeNode('] ERROR', args)
def coma_evaluator(args):
return CompositeNode(',', [x for x in args if type(x) != SymbolDesc])
def unary_evaluator(args):
if len(args) != 2:
return CompositeNode('UNARY ERROR', args)
if type(args[0]) == SymbolDesc and type(args[1]) != SymbolDesc:
return CompositeNode(args[0].symbol, [args[1]])
elif type(args[0]) != SymbolDesc and type(args[1]) == SymbolDesc:
return CompositeNode('post'+args[1].symbol, [args[0]])
else:
return CompositeNode('UNARY ERROR', args)
def unary_or_binary_evaluator(args):
if (len(args) == 2
and type(args[0]) == SymbolDesc
and type(args[1]) != SymbolDesc):
return CompositeNode(args[0].symbol, [args[1]])
elif (len(args) == 2
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc):
return CompositeNode('post'+args[1].symbol, [args[0]])
elif (len(args) == 3
and type(args[0]) != SymbolDesc
and type(args[1]) == SymbolDesc
and type(args[2]) != SymbolDesc):
return CompositeNode(args[1].symbol, [args[0], args[2]])
else:
return CompositeNode('1,2-ARY ERROR', args)
def question_evaluator(args):
if (len(args) != 5
or type(args[0]) == SymbolDesc
or type(args[1]) != SymbolDesc or args[1].symbol != '?'
or type(args[2]) == SymbolDesc
or type(args[3]) != SymbolDesc or args[3].symbol != ':'
or type(args[4]) == SymbolDesc):
return CompositeNode('? ERROR', args)
return CompositeNode('?', [args[0], args[2], args[4]])
def colon_evaluator(args):
return CompositeNode(': ERROR', args)
def cexp_parser():
parser = Parser()
parser.register_symbol(',', 2, 2, coma_evaluator)
parser.register_symbol(['=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&=', '|=', '^='], 5, 4)
parser.register_symbol('?', 7, 1.5, question_evaluator)
parser.register_symbol(':', 1.5, 6, colon_evaluator)
parser.register_symbol('||', 8, 9)
parser.register_symbol('&&', 10, 11)
parser.register_symbol('|', 12, 13)
parser.register_symbol('^', 14, 15)
parser.register_symbol('&', 16, 17, unary_or_binary_evaluator)
parser.register_symbol(['==', '!='], 18, 19)
parser.register_symbol(['<', '>', '<=', '>='], 20, 21)
parser.register_symbol(['<<', '>>'], 22, 23)
parser.register_symbol(['+', '-'], 24, 25, unary_or_binary_evaluator)
parser.register_symbol(['/', '%'], 26, 27)
parser.register_symbol(['*'], 26, 27, unary_or_binary_evaluator)
parser.register_symbol('**', 29, 28)
parser.register_symbol(['++', '--', '~', '!'], 31, 30, unary_evaluator) # +, -, *, & should be here
parser.register_symbol(['.', '->'], 32, 33)
parser.register_symbol('(', 100, 1, open_parenthesis_evaluator)
parser.register_symbol(')', 1, 100, close_parenthesis_evaluator)
parser.register_symbol('[', 100, 1, open_bracket_evaluator)
parser.register_symbol(']', 1, 100, close_bracket_evaluator)
return parser
def main(args):
parser = cexp_parser()
for s in args[1:]:
try:
exp = parser.parse(s)
print('{} -> {}'.format(s, exp))
except RuntimeError as run_error:
print('Unable to parse {}: {}'.format(s, run_error))
if __name__ == "__main__":
main(sys.argv)
| bsd-2-clause |
ansible/ansible-modules-core | packaging/os/rhn_register.py | 25 | 15623 | #!/usr/bin/python
# (c) James Laska
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rhn_register
short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
description:
- Manage registration to the Red Hat Network.
version_added: "1.2"
author: James Laska
notes:
- In order to register a system, rhnreg_ks requires either a username and password, or an activationkey.
requirements:
- rhnreg_ks
options:
state:
description:
- whether to register (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
required: False
default: null
password:
description:
- Red Hat Network password
required: False
default: null
server_url:
description:
- Specify an alternative Red Hat Network server URL
required: False
default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
profilename:
description:
- supply an profilename for use with registration
required: False
default: null
version_added: "2.0"
sslcacert:
description:
- supply a custom ssl CA certificate file for use with registration
required: False
default: None
version_added: "2.1"
systemorgid:
description:
- supply an organizational id for use with registration
required: False
default: None
version_added: "2.1"
channels:
description:
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
required: false
default: []
'''
EXAMPLES = '''
# Unregister system from RHN.
- rhn_register:
state: absent
username: joe_user
password: somepass
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- rhn_register:
state: present
username: joe_user
password: somepass
# Register with activationkey (1-222333444) and enable extended update support.
- rhn_register:
state: present
activationkey: 1-222333444
enable_eus: true
# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname.
- rhn_register:
state: present
activationkey: 1-222333444
profilename: host.example.com.custom
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
- rhn_register:
state: present
username: joe_user
password: somepass'
server_url: https://xmlrpc.my.satellite/XMLRPC
# Register as user (joe_user) with password (somepass) and enable
# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1).
- rhn_register:
state: present
username: joe_user
password: somepass
channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
import sys
import types
import xmlrpclib
import urlparse
# Attempt to import rhn client tools
sys.path.insert(0, '/usr/share/rhn')
try:
import up2date_client
import up2date_client.config
HAS_UP2DATE_CLIENT = True
except ImportError:
HAS_UP2DATE_CLIENT = False
# INSERT REDHAT SNIPPETS
from ansible.module_utils.redhat import *
# INSERT COMMON SNIPPETS
from ansible.module_utils.basic import *
class Rhn(RegistrationBase):
def __init__(self, username=None, password=None):
RegistrationBase.__init__(self, username, password)
self.config = self.load_config()
def load_config(self):
'''
Read configuration from /etc/sysconfig/rhn/up2date
'''
if not HAS_UP2DATE_CLIENT:
return None
self.config = up2date_client.config.initUp2dateConfig()
# Add support for specifying a default value w/o having to standup some
# configuration. Yeah, I know this should be subclassed ... but, oh
# well
def get_option_default(self, key, default=''):
# the class in rhn-client-tools that this comes from didn't
# implement __contains__() until 2.5.x. That's why we check if
# the key is present in the dictionary that is the actual storage
if key in self.dict:
return self[key]
else:
return default
self.config.get_option = types.MethodType(get_option_default, self.config, up2date_client.config.Config)
return self.config
@property
def hostname(self):
'''
Return the non-xmlrpc RHN hostname. This is a convenience method
used for displaying a more readable RHN hostname.
Returns: str
'''
url = urlparse.urlparse(self.config['serverURL'])
return url[1].replace('xmlrpc.','')
@property
def systemid(self):
systemid = None
xpath_str = "//member[name='system_id']/value/string"
if os.path.isfile(self.config['systemIdPath']):
fd = open(self.config['systemIdPath'], 'r')
xml_data = fd.read()
fd.close()
# Ugh, xml parsing time ...
# First, try parsing with libxml2 ...
if systemid is None:
try:
import libxml2
doc = libxml2.parseDoc(xml_data)
ctxt = doc.xpathNewContext()
systemid = ctxt.xpathEval(xpath_str)[0].content
doc.freeDoc()
ctxt.xpathFreeContext()
except ImportError:
pass
# m-kay, let's try with lxml now ...
if systemid is None:
try:
from lxml import etree
root = etree.fromstring(xml_data)
systemid = root.xpath(xpath_str)[0].text
except ImportError:
pass
# Strip the 'ID-' prefix
if systemid is not None and systemid.startswith('ID-'):
systemid = systemid[3:]
return int(systemid)
@property
def is_registered(self):
'''
Determine whether the current system is registered.
Returns: True|False
'''
return os.path.isfile(self.config['systemIdPath'])
def configure(self, server_url):
'''
Configure system for registration
'''
self.config.set('serverURL', server_url)
self.config.save()
def enable(self):
'''
Prepare the system for RHN registration. This includes ...
* enabling the rhnplugin yum plugin
* disabling the subscription-manager yum plugin
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None):
'''
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
register_cmd = ['/usr/sbin/rhnreg_ks', '--username', self.username, '--password', self.password, '--force']
if self.module.params.get('server_url', None):
register_cmd.extend(['--serverUrl', self.module.params.get('server_url')])
if enable_eus:
register_cmd.append('--use-eus-channel')
if activationkey is not None:
register_cmd.extend(['--activationkey', activationkey])
if profilename is not None:
register_cmd.extend(['--profilename', profilename])
if sslcacert is not None:
register_cmd.extend(['--sslCACert', sslcacert])
if systemorgid is not None:
register_cmd.extend(['--systemorgid', systemorgid])
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if not hasattr(self, 'server') or self.server is None:
if self.hostname != 'rhn.redhat.com':
url = "https://%s/rpc/api" % self.hostname
else:
url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpclib.Server(url, verbose=0)
self.session = self.server.auth.login(self.username, self.password)
func = getattr(self.server, method)
return func(self.session, *args)
def unregister(self):
'''
Unregister a previously registered system
'''
# Initiate RPC connection
self.api('system.deleteSystems', [self.systemid])
# Remove systemid file
os.unlink(self.config['systemIdPath'])
def subscribe(self, channels=[]):
if len(channels) <= 0:
return
if self._is_hosted():
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
new_channels = [item['channel_label'] for item in current_channels]
new_channels.extend(channels)
return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
else:
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
current_channels = [item['label'] for item in current_channels]
new_base = None
new_childs = []
for ch in channels:
if ch in current_channels:
continue
if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
new_base = ch
else:
if ch not in new_childs:
new_childs.append(ch)
out_base = 0
out_childs = 0
if new_base:
out_base = self.api('system.setBaseChannel', self.systemid, new_base)
if new_childs:
out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
return out_base and out_childs
def _subscribe(self, channels=[]):
'''
Subscribe to requested yum repositories using 'rhn-channel' command
'''
rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password)
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True)
# Enable requested repoid's
for wanted_channel in channels:
# Each inserted repo regexp will be matched. If no match, no success.
for available_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end
if re.search(wanted_repo, available_channel):
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True)
def _is_hosted(self):
'''
Return True if we are running against Hosted (rhn.redhat.com) or
False otherwise (when running against Satellite or Spacewalk)
'''
if 'rhn.redhat.com' in self.hostname:
return True
else:
return False
def main():
# Read system RHN configuration
rhn = Rhn()
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
username = dict(default=None, required=False),
password = dict(default=None, required=False, no_log=True),
server_url = dict(default=None, required=False),
activationkey = dict(default=None, required=False, no_log=True),
profilename = dict(default=None, required=False),
sslcacert = dict(default=None, required=False, type='path'),
systemorgid = dict(default=None, required=False),
enable_eus = dict(default=False, type='bool'),
channels = dict(default=[], type='list'),
)
)
if not HAS_UP2DATE_CLIENT:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
if not module.params['server_url']:
module.params['server_url'] = rhn.config.get_option('serverURL')
state = module.params['state']
rhn.username = module.params['username']
rhn.password = module.params['password']
rhn.configure(module.params['server_url'])
activationkey = module.params['activationkey']
profilename = module.params['profilename']
sslcacert = module.params['sslcacert']
systemorgid = module.params['systemorgid']
channels = module.params['channels']
rhn.module = module
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhn.enable()
rhn.register(module.params['enable_eus'] == True, activationkey, profilename, sslcacert, systemorgid)
rhn.subscribe(channels)
except Exception:
e = get_exception()
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e))
module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhn.unregister()
except Exception:
e = get_exception()
module.fail_json(msg="Failed to unregister: %s" % e)
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
if __name__ == '__main__':
main()
| gpl-3.0 |
khkaminska/bokeh | bokeh/_legacy_charts/builder/tests/test_histogram_builder.py | 6 | 4247 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
from mock import patch
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pandas as pd
from bokeh._legacy_charts import Histogram
from ._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestHistogram(unittest.TestCase):
def test_supported_input(self):
normal = [1, 2, 3, 1]
lognormal = [5, 4, 4, 1]
xyvalues = OrderedDict(normal=normal, lognormal=lognormal)
xyvaluesdf = pd.DataFrame(xyvalues)
exptected = dict(
leftnormal=[1., 1.4, 1.8, 2.2, 2.6],
rightnormal=[1.4, 1.8, 2.2, 2.6, 3.],
lognormal=[5, 4, 4, 1],
edgeslognormal=[1., 1.8, 2.6, 3.4, 4.2, 5.],
bottomlognormal=[0, 0, 0, 0, 0],
bottomnormal=[0, 0, 0, 0, 0],
edgesnormal=[1., 1.4, 1.8, 2.2, 2.6, 3.],
histlognormal=[0.3125, 0., 0., 0.625, 0.3125],
leftlognormal=[1., 1.8, 2.6, 3.4, 4.2],
normal=[1, 2, 3, 1],
rightlognormal=[1.8, 2.6, 3.4, 4.2, 5.],
histnormal=[1.25, 0., 0.625, 0., 0.625],
)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Histogram, _xy, bins=5)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
for key, expected_v in exptected.items():
assert_array_almost_equal(builder._data[key], expected_v, decimal=2)
lvalues = [[1, 2, 3, 1], [5, 4, 4, 1]]
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
hm = create_chart(Histogram, _xy, bins=5)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1'])
for key, expected_v in exptected.items():
# replace the keys because we have 0, 1 instead of normal and lognormal
key = key.replace('lognormal', '1').replace('normal', '0')
assert_array_almost_equal(builder._data[key], expected_v, decimal=2)
@patch('bokeh._legacy_charts.builder.histogram_builder.np.histogram', return_value=([1, 3, 4], [2.4, 4]))
def test_histogram_params(self, histogram_mock):
inputs = [[5, 0, 0.5, True], [3, 1, 0, False]]
normal = [1, 2, 3, 1]
lognormal = [5, 4, 4, 1]
xyvalues = OrderedDict()
xyvalues['normal'] = normal
xyvalues['lognormal'] = lognormal
for (bins, mu, sigma, dens) in inputs:
histogram_mock.reset_mock()
kws = dict(bins=bins, mu=mu, sigma=sigma, density=dens)
hm = create_chart(Histogram, xyvalues, compute_values=False, **kws)
builder = hm._builders[0]
# ensure all class attributes have been correctly set
for key, value in kws.items():
self.assertEqual(getattr(builder, key), value)
builder._process_data()
# ensure we are calling numpy.histogram with the right args
calls = histogram_mock.call_args_list
assert_array_equal(calls[0][0][0], np.array([1, 2, 3, 1]))
assert_array_equal(calls[1][0][0], np.array([5, 4, 4, 1]))
self.assertEqual(calls[0][1]['bins'], bins)
self.assertEqual(calls[1][1]['bins'], bins)
self.assertEqual(calls[0][1]['density'], dens)
self.assertEqual(calls[1][1]['density'], dens)
| bsd-3-clause |
tszym/ansible | lib/ansible/modules/network/avi/avi_applicationprofile.py | 50 | 6539 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_applicationprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ApplicationProfile Avi RESTful Object
description:
- This module is used to configure ApplicationProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
dns_service_profile:
description:
- Specifies various dns service related controls for virtual service.
dos_rl_profile:
description:
- Specifies various security related controls for virtual service.
http_profile:
description:
- Specifies the http application proxy profile parameters.
name:
description:
- The name of the application profile.
required: true
preserve_client_ip:
description:
- Specifies if client ip needs to be preserved for backend connection.
- Not compatible with connection multiplexing.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
tcp_app_profile:
description:
- Specifies the tcp application proxy profile parameters.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Specifies which application layer proxy is enabled for the virtual service.
- Enum options - APPLICATION_PROFILE_TYPE_L4, APPLICATION_PROFILE_TYPE_HTTP, APPLICATION_PROFILE_TYPE_SYSLOG, APPLICATION_PROFILE_TYPE_DNS,
- APPLICATION_PROFILE_TYPE_SSL.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the application profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create an Application Profile for HTTP application enabled for SSL traffic
avi_applicationprofile:
controller: ''
username: ''
password: ''
http_profile:
cache_config:
age_header: true
aggressive: false
date_header: true
default_expire: 600
enabled: false
heuristic_expire: false
max_cache_size: 0
max_object_size: 4194304
mime_types_group_refs:
- admin:System-Cacheable-Resource-Types
min_object_size: 100
query_cacheable: false
xcache_header: true
client_body_timeout: 0
client_header_timeout: 10000
client_max_body_size: 0
client_max_header_size: 12
client_max_request_size: 48
compression_profile:
compressible_content_ref: admin:System-Compressible-Content-Types
compression: false
remove_accept_encoding_header: true
type: AUTO_COMPRESSION
connection_multiplexing_enabled: true
hsts_enabled: false
hsts_max_age: 365
http_to_https: false
httponly_enabled: false
keepalive_header: false
keepalive_timeout: 30000
max_bad_rps_cip: 0
max_bad_rps_cip_uri: 0
max_bad_rps_uri: 0
max_rps_cip: 0
max_rps_cip_uri: 0
max_rps_unknown_cip: 0
max_rps_unknown_uri: 0
max_rps_uri: 0
post_accept_timeout: 30000
secure_cookie_enabled: false
server_side_redirect_to_https: false
spdy_enabled: false
spdy_fwd_proxy_mode: false
ssl_client_certificate_mode: SSL_CLIENT_CERTIFICATE_NONE
ssl_everywhere_enabled: false
websockets_enabled: true
x_forwarded_proto_enabled: false
xff_alternate_name: X-Forwarded-For
xff_enabled: true
name: System-HTTP
tenant_ref: admin
type: APPLICATION_PROFILE_TYPE_HTTP
'''
RETURN = '''
obj:
description: ApplicationProfile (api/applicationprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
dns_service_profile=dict(type='dict',),
dos_rl_profile=dict(type='dict',),
http_profile=dict(type='dict',),
name=dict(type='str', required=True),
preserve_client_ip=dict(type='bool',),
tcp_app_profile=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'applicationprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
gianrubio/prometheus-operator | contrib/kube-prometheus/assets/grafana/statefulset.dashboard.py | 2 | 15125 | import sys
import os.path
sys.path.insert(0, os.path.dirname(__file__))
from _grafanalib import *
dashboard = Dashboard(
title='StatefulSet',
version=1,
graphTooltip=1,
time=Time(start='now-6h'),
templating=Templating(list=[
{
'allValue': '.*',
'current': {},
'datasource': 'prometheus',
'hide': 0,
'includeAll': False,
'label': 'Namespace',
'multi': False,
'name': 'statefulset_namespace',
'options': [],
'query': 'label_values(kube_statefulset_metadata_generation, '
'namespace)',
'refresh': 1,
'regex': '',
'sort': 0,
'tagValuesQuery': None,
'tags': [],
'tagsQuery': '',
'type': 'query',
'useTags': False,
},
{
'allValue': None,
'current': {},
'datasource': 'prometheus',
'hide': 0,
'includeAll': False,
'label': 'StatefulSet',
'multi': False,
'name': 'statefulset_name',
'options': [],
'query': 'label_values(kube_statefulset_metadata_generation'
'{namespace="$statefulset_namespace"}, statefulset)',
'refresh': 1,
'regex': '',
'sort': 0,
'tagValuesQuery': '',
'tags': [],
'tagsQuery': 'statefulset',
'type': 'query',
'useTags': False,
},
]),
rows=[
Row(panels=[
SingleStat(
title='CPU',
id=8,
gauge=Gauge(show=False),
postfix='cores',
span=4,
valueFontSize='110%',
mappingType=1,
mappingTypes=[
{
'name': 'value to text',
'value': 1,
},
{
'name': 'range to text',
'value': 2,
},
],
valueMaps=[
{
'op': '=',
'text': 'N/A',
'value': 'null',
},
],
rangeMaps=[
{
'from': 'null',
'text': 'N/A',
'to': 'null',
},
],
colors=[
(245, 54, 54, 0.9),
(237, 129, 40, 0.89),
(50, 172, 45, 0.97),
],
sparkline=SparkLine(
fillColor=(31, 118, 189, 0.18),
lineColor=(31, 120, 193),
show=True,
),
targets=[
{
'expr': 'sum(rate(container_cpu_usage_seconds_total'
'{namespace=\"$statefulset_namespace\",pod_name=~\"'
'$statefulset_name.*\"}[3m]))',
},
],
),
SingleStat(
title='Memory',
id=9,
postfix='GB',
prefixFontSize='80%',
gauge=Gauge(show=False),
span=4,
valueFontSize='110%',
mappingType=1,
mappingTypes=[
{
'name': 'value to text',
'value': 1,
},
{
'name': 'range to text',
'value': 2,
},
],
sparkline=SparkLine(
fillColor=(31, 118, 189, 0.18),
lineColor=(31, 120, 193),
show=True,
),
valueMaps=[
{
'op': '=',
'text': 'N/A',
'value': 'null',
},
],
rangeMaps=[
{
'from': 'null',
'text': 'N/A',
'to': 'null',
},
],
colors=[
(245, 54, 54, 0.9),
(237, 129, 40, 0.89),
(50, 172, 45, 0.97),
],
targets=[
{
'expr': 'sum(container_memory_usage_bytes{namespace='
'\"$statefulset_namespace\",pod_name=~\"$'
'statefulset_name.*\"}) / 1024^3',
'intervalFactor': 2,
'refId': 'A',
'step': 600,
},
],
),
SingleStat(
title='Network',
format='Bps',
gauge=Gauge(thresholdMarkers=False),
id=7,
postfix='',
span=4,
mappingType=1,
mappingTypes=[
{
'name': 'value to text',
'value': 1,
},
{
'name': 'range to text',
'value': 2,
},
],
sparkline=SparkLine(
fillColor=(31, 118, 189, 0.18),
lineColor=(31, 120, 193),
show=True,
),
valueMaps=[
{
'op': '=',
'text': 'N/A',
'value': 'null',
},
],
rangeMaps=[
{
'from': 'null',
'text': 'N/A',
'to': 'null',
},
],
colors=[
(245, 54, 54, 0.9),
(237, 129, 40, 0.89),
(50, 172, 45, 0.97),
],
targets=[
{
'expr': 'sum(rate(container_network_transmit_'
'bytes_total'
'{namespace=\"$statefulset_namespace\",pod_name=~\"'
'$statefulset_name.*\"}[3m])) + '
'sum(rate(container_network_receive_bytes_total'
'{namespace=\"$statefulset_namespace\",pod_name=~'
'\"$statefulset_name.*\"}[3m]))',
},
],
),
],
height=200,
),
Row(
height=100, panels=[
SingleStat(
title='Desired Replicas',
id=5,
mappingType=1,
mappingTypes=[
{
'name': 'value to text',
'value': 1,
},
{
'name': 'range to text',
'value': 2,
},
],
span=3,
colors=[
(245, 54, 54, 0.9),
(237, 129, 40, 0.89),
(50, 172, 45, 0.97),
],
targets=[
{
'metric': 'kube_statefulset_replicas',
'expr': 'max(kube_statefulset_replicas'
'{statefulset="$statefulset_name",namespace='
'"$statefulset_namespace"}) without '
'(instance, pod)',
},
],
valueMaps=[
{
'op': '=',
'text': 'N/A',
'value': 'null',
},
],
gauge=Gauge(thresholdMarkers=False, show=False),
rangeMaps=[
{
'from': 'null',
'text': 'N/A',
'to': 'null',
},
],
),
SingleStat(
title='Available Replicas',
colors=[
(245, 54, 54, 0.9),
(237, 129, 40, 0.89),
(50, 172, 45, 0.97),
],
gauge=Gauge(show=False),
id=6,
mappingType=1,
mappingTypes=[
{
'name': 'value to text',
'value': 1,
},
{
'name': 'range to text',
'value': 2,
},
],
targets=[
{
'expr': 'min(kube_statefulset_status_replicas'
'{statefulset=\"$statefulset_name\",'
'namespace=\"$statefulset_namespace\"}) without '
'(instance, pod)',
},
],
rangeMaps=[
{
'from': 'null',
'text': 'N/A',
'to': 'null',
},
],
span=3,
sparkline=SparkLine(),
valueMaps=[
{
'op': '=',
'text': 'N/A',
'value': 'null',
}
],
),
SingleStat(
title='Observed Generation',
colors=[
(245, 54, 54, 0.9),
(237, 129, 40, 0.89),
(50, 172, 45, 0.97),
],
gauge=Gauge(),
id=3,
mappingType=1,
mappingTypes=[
{
'name': 'value to text',
'value': 1,
},
{
'name': 'range to text',
'value': 2,
},
],
targets=[
{
'expr': 'max(kube_statefulset_status_observed_'
'generation{statefulset=\"$statefulset_name\",'
'namespace=\"$statefulset_namespace\"}) without '
'(instance, pod)',
},
],
rangeMaps=[
{
'from': "null",
'text': 'N/A',
'to': 'null',
},
],
span=3,
sparkline=SparkLine(),
valueMaps=[
{
'op': '=',
'text': 'N/A',
'value': 'null',
}
],
),
SingleStat(
title='Metadata Generation',
colors=[
(245, 54, 54, 0.9),
(237, 129, 40, 0.89),
(50, 172, 45, 0.97),
],
gauge=Gauge(show=False),
id=2,
mappingType=1,
mappingTypes=[
{
'name': 'value to text',
'value': 1,
},
{
'name': 'range to text',
'value': 2,
},
],
targets=[
{
'expr': 'max(kube_statefulset_metadata_generation'
'{statefulset=\"$statefulset_name\",namespace=\"'
'$statefulset_namespace\"}) without (instance, '
'pod)',
},
],
rangeMaps=[
{
'from': 'null',
'text': 'N/A',
'to': 'null',
},
],
span=3,
sparkline=SparkLine(),
valueMaps=[
{
'op': '=',
'text': 'N/A',
'value': 'null',
},
],
),
],
),
Row(
height=350, panels=[
Graph(
title='Replicas',
dashLength=10,
dashes=False,
id=1,
spaceLength=10,
targets=[
{
'expr': 'min(kube_statefulset_status_replicas'
'{statefulset=\"$statefulset_name\",'
'namespace=\"$statefulset_namespace\"}) without '
'(instance, pod)',
'legendFormat': 'available',
'refId': 'B',
'step': 30,
},
{
'expr': 'max(kube_statefulset_replicas'
'{statefulset=\"$statefulset_name\",namespace=\"'
'$statefulset_namespace\"}) without '
'(instance, pod)',
'legendFormat': 'desired',
'refId': 'E',
'step': 30,
}
],
xAxis=XAxis(mode='time'),
yAxes=YAxes(
YAxis(min=None),
YAxis(format='short', min=None, show=False),
),
),
]
),
],
)
| apache-2.0 |
sda2b/youtube-dl | youtube_dl/extractor/eighttracks.py | 121 | 5868 | # coding: utf-8
from __future__ import unicode_literals
import json
import random
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
)
class EightTracksIE(InfoExtractor):
IE_NAME = '8tracks'
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
_TEST = {
"name": "EightTracks",
"url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
"info_dict": {
'id': '1336550',
'display_id': 'youtube-dl-test-tracks-a',
"description": "test chars: \"'/\\ä↭",
"title": "youtube-dl test tracks \"'/\\ä↭<>",
},
"playlist": [
{
"md5": "96ce57f24389fc8734ce47f4c1abcc55",
"info_dict": {
"id": "11885610",
"ext": "m4a",
"title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
"md5": "4ab26f05c1f7291ea460a3920be8021f",
"info_dict": {
"id": "11885608",
"ext": "m4a",
"title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
"md5": "d30b5b5f74217410f4689605c35d1fd7",
"info_dict": {
"id": "11885679",
"ext": "m4a",
"title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
"md5": "4eb0a669317cd725f6bbd336a29f923a",
"info_dict": {
"id": "11885680",
"ext": "m4a",
"title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
"md5": "1893e872e263a2705558d1d319ad19e8",
"info_dict": {
"id": "11885682",
"ext": "m4a",
"title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
"md5": "b673c46f47a216ab1741ae8836af5899",
"info_dict": {
"id": "11885683",
"ext": "m4a",
"title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
"md5": "1d74534e95df54986da7f5abf7d842b7",
"info_dict": {
"id": "11885684",
"ext": "m4a",
"title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
"md5": "f081f47af8f6ae782ed131d38b9cd1c0",
"info_dict": {
"id": "11885685",
"ext": "m4a",
"title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
}
]
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
data = self._parse_json(
self._search_regex(
r"(?s)PAGE\.mix\s*=\s*({.+?});\n", webpage, 'trax information'),
playlist_id)
session = str(random.randint(0, 1000000000))
mix_id = data['id']
track_count = data['tracks_count']
duration = data['duration']
avg_song_duration = float(duration) / track_count
# duration is sometimes negative, use predefined avg duration
if avg_song_duration <= 0:
avg_song_duration = 300
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
next_url = first_url
entries = []
for i in range(track_count):
api_json = None
download_tries = 0
while api_json is None:
try:
api_json = self._download_webpage(
next_url, playlist_id,
note='Downloading song information %d/%d' % (i + 1, track_count),
errnote='Failed to download song information')
except ExtractorError:
if download_tries > 3:
raise
else:
download_tries += 1
self._sleep(avg_song_duration, playlist_id)
api_data = json.loads(api_json)
track_data = api_data['set']['track']
info = {
'id': compat_str(track_data['id']),
'url': track_data['track_file_stream_url'],
'title': track_data['performer'] + ' - ' + track_data['name'],
'raw_title': track_data['name'],
'uploader_id': data['user']['login'],
'ext': 'm4a',
}
entries.append(info)
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (
session, mix_id, track_data['id'])
return {
'_type': 'playlist',
'entries': entries,
'id': compat_str(mix_id),
'display_id': playlist_id,
'title': data.get('name'),
'description': data.get('description'),
}
| unlicense |
bl4ckdu5t/registron | PyInstaller/lib/unittest2/test/test_loader.py | 11 | 49503 | import sys
import types
import unittest2
if sys.version_info[:2] == (2,3):
from sets import Set as set
from sets import ImmutableSet as frozenset
class Test_TestLoader(unittest2.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest2.TestCase):
def foo_bar(self): pass
empty_suite = unittest2.TestSuite()
loader = unittest2.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest2.TestSuite):
pass
loader = unittest2.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
loader = unittest2.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest2.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest2.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest2.TestSuite)
self.assertEquals(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEquals(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest2.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegexp(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('unittest2.sdasfasfasdf')
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('', unittest2)
except AttributeError:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest2)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testsuite = unittest2.TestSuite([MyTestCase('test')])
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
testcase_2 = unittest2.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest2.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest2.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest2.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest2.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest2.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames([], unittest2)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['unittest2.sdasfasfasdf', 'unittest2'])
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames([''], unittest2)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest2)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest2.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testsuite = unittest2.TestSuite([MyTestCase('test')])
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
testcase_2 = unittest2.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest2.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest2.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest2.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest2.TestCase):
def foo():
return testcase_1
foo = staticmethod(foo)
m.Foo = Foo
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest2.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest2.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest2.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest2.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest2.TestCase):
def foobar(self): pass
loader = unittest2.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest2.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest2.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest2.TestSuite([Foo('foo_bar')])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest2.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest2.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest2.TestSuite([Foo('foo_bar')])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest2.TestSuite([unittest2.TestSuite([Foo('foo_bar')])])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest2.TestSuite([tests_2])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.testMethodPrefix == 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
def test_sortTestMethodsUsing__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.sortTestMethodsUsing is cmp)
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.suiteClass is unittest2.TestSuite)
if __name__ == '__main__':
unittest2.main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.