repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
LowieHuyghe/edmunds | edmunds/log/drivers/timedfile.py | 1 | 1853 |
from logging.handlers import TimedRotatingFileHandler
from logging import WARNING, Formatter
import os
class TimedFile(TimedRotatingFileHandler):
"""
Timed File Driver
"""
def __init__(self, app, log_path, filename, prefix='', when='D', interval=1, backup_count=0, level=WARNING, format=None):
"""
Initiate the instance
:param app: The application
:type app: Application
:param log_path: The log path
:type log_path: str
:param filename: The filename
:type filename: str
:param prefix: The prefix for storing
:type prefix: str
:param when: Store when?
:type when: str
:param interval: The interval for storing
:type interval: int
:param backup_count: The max number of files stored
:type backup_count: int
:param level: The minimum level to log
:type level: int
:param format: The format for the formatter
:type format: str
"""
self._app = app
filename = os.path.join(log_path, prefix + filename)
super(TimedFile, self).__init__(filename, when=when, interval=interval, backupCount=backup_count, utc=True)
self.setLevel(level)
if format is None:
format = '[%(asctime)s] %(levelname)-8s: %(message)s [in %(pathname)s:%(lineno)d]'
self.setFormatter(Formatter(format))
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
# self.encoding
# self.mode
return self._app.fs().write_stream(self.baseFilename, append=True)
| apache-2.0 |
jtgans/squish | lib/squish/command.py | 1 | 8698 | #!/usr/bin/env python
# -*- python -*-
#
# Copyright (C) 2008 Google, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
Squish: The stupid bug tracker.
'''
import os
import os.path
import sys
import hashlib
import glob
import optparse
import yaml
from . import progName
from . import __version__
from debuggable import Debuggable
from registeredcommand import RegisteredCommand
from registeredcommand import commands
import bug
import config
import userconfig
class Command(Debuggable):
'''
Base class for all squish commands. Contains methods and helpers that are
common to all commands.
'''
__metaclass__ = RegisteredCommand
command_name = ''
synopsis = ''
usage = ''
# Override this to allow a command to work without a squish bug repository
# being present.
requireSiteConfig = True
_parser = None
_args = None
_flags = None
_siteDir = None
_config = None
_userConfig = None
def __init__(self):
self._parseArguments()
self._siteDir = self._findSiteDir()
self._loadSiteConfig()
self._loadUserConfig()
# Do some final cleanup with the options to make sure they're sane.
if self._flags.use_pager:
if (not sys.stdout.isatty() or
not os.path.exists(self._flags.pager_cmd)):
self._flags.use_pager = False
self._debug_mode = self._flags.debug
self._setName(self.command_name)
def _findSiteDir(self):
'''
Walk the directory tree from the current directory to the root directory,
looking for a directory called bugs with a file in it called config.yaml.
'''
curpath = os.path.abspath('.')
while curpath != '/':
if (os.path.isdir(curpath + '/bugs') and
os.path.isfile(curpath + '/bugs/config.yaml')):
return curpath + '/bugs'
curpath = os.path.abspath(curpath + '/..')
return None
def _loadUserConfig(self):
'''
Load in the user's preferences config. If none exists, just initialize to
the default and write it out to ~/.squishrc.yaml
'''
homedir = os.environ['HOME']
rcfile = '%s/.squishrc' % homedir
if os.path.isfile(rcfile):
try:
stream = file(rcfile, 'r')
self._userConfig = yaml.load(stream)
stream.close()
except Exception, e:
sys.stderr.write('Unable to read from ~/.squishrc: %s\n' % str(e))
sys.exit(1)
else:
self._userConfig = userconfig.UserConfig()
try:
stream = file(rcfile, 'w')
yaml.dump(self._userConfig, stream, default_flow_style=False)
stream.close()
except Exception, e:
sys.stderr.write('Unable to create ~/.squishrc: %s\n' % str(e))
sys.exit(1)
def _loadSiteConfig(self):
'''
Load in the site-specific config file. Should be in self._siteDir +
'/config.yaml'. If no config.yaml file exists, throw an error and exit.
'''
if self._siteDir:
if not os.path.isfile(self._siteDir + '/config.yaml'):
sys.stderr.write('Directory %s is not a squish bug repository.\n' %
self._siteDir)
sys.exit(1)
try:
stream = file(self._siteDir + '/config.yaml', 'r')
self._config = yaml.load(stream)
stream.close()
except Exception, e:
sys.stderr.write('Unable to load %s/config.yaml: %s\n' % (self._siteDir,
str(e)))
sys.exit(1)
elif self.requireSiteConfig:
sys.stderr.write('Unable to find squish bug repository.\n')
sys.exit(1)
def _parseArguments(self):
# Generate our default values for the options
pager_cmd = (os.environ.has_key('PAGER') and os.environ['PAGER']) or ''
self._parser = optparse.OptionParser(usage='',
version=self._getVersionString())
self._parser.add_option('--pager', dest='pager_cmd',
action='store', default=pager_cmd,
help=('The path to the pager command to use. '
'Defaults to the environment variable PAGER '
'if set. If PAGER is not set, or the '
'controlling terminal is not a tty, the '
'pager will not be used.'))
self._parser.add_option('--no-pager', dest='use_pager',
action='store_false', default=True,
help='Don\'t use the pager.')
self._parser.add_option('--debug', dest='debug',
action='store_true', default=False,
help='Turn on debugging information.')
# Let our subclasses alter the opt parser if needed.
self._setupOptParse()
# Go!
(self._flags, self._args) = self._parser.parse_args()
# Strip off the command name from the args -- we don't need it.
self._args = self._args[1:]
def spawnUserEditor(self, template, filename):
'''
Spawn the user's editor on a template given in a specific filename.
'''
# Write out the bug report template so the editor can actually hack on it.
if not os.path.isfile(filename):
try:
stream = file(filename, 'w')
stream.write(template)
stream.close()
except OSError, e:
sys.stderr.write('Unable to open %s for writing: %s'
% (filename, str(e)))
sys.exit(1)
# Take the hash of the template so that we know if it's been changed we can
# go ahead and use it for the report.
orig_hash = sha1(template).hexdigest()
# Spawn the user's editor here
os.system('%s %s' % (self._userConfig.editor, filename))
# Read it back in
try:
stream = file(filename, 'r')
report = ''.join(stream.readlines())
stream.close()
except OSError, e:
sys.stderr.write('Unable to open %s for reading: %s'
% (filename, str(e)))
sys.stderr.write('%s has been left behind.\n' % filename)
sys.exit(1)
# Generate the new hash of the report
new_hash = sha1(report).hexdigest()
# Verify the hash changed
if orig_hash == new_hash:
sys.stderr.write('Template unchanged -- aborting.\n')
sys.stderr.write('%s has been left behind.\n' % filename)
sys.exit(1)
return report
def findBugsByNumOrPartial(self, bugnum_or_partial, states=None):
if not bug.BUG_PATTERN.match(bugnum_or_partial):
raise TypeError('%s is not a valid bug number or partial.'
% bugnum_or_partial)
filenames = []
if not '*' in bugnum_or_partial:
partial = bugnum_or_partial + '*'
else:
partial = bugnum_or_partial
if states == None:
states = bug.STATES
elif not isinstance(states, list):
raise TypeError('states must be a list or None')
for state in states:
if state not in bug.STATES:
raise TypeError('%s is not a valid state.' % state)
for state in states:
globbed_names = glob.glob('%s/%s/%s' % (self._siteDir, state, partial))
for name in globbed_names:
basename = os.path.basename(name)
if bug.BUG_PATTERN.match(basename):
filenames.append(name)
return filenames
def _getVersionString(self):
version = '.'.join(map(str, __version__))
return 'squish %s' % version
def generateHelp(self):
'''
Abstract method to generate the help string for this command.
'''
raise NotImplementedError, ('generateHelp must be implemented by '
'subclasses of Command')
def _setupOptParse(self):
'''
Abstract method to allow subclasses to alter the opt parser.
'''
raise NotImplementedError, ('_setupOptParse must be implemented by '
'subclasses of Command')
# Make sure the abstract Command class doesn't show up in the registered command
# list.
del commands['']
| gpl-2.0 |
stonebig/bokeh | bokeh/application/handlers/tests/test___init__.py | 2 | 2005 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
import bokeh.application.handlers as bah
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'CodeHandler',
'DirectoryHandler',
'FunctionHandler',
'Handler',
'NotebookHandler',
'ScriptHandler',
'ServerLifecycleHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bah, ALL)
| bsd-3-clause |
arthurchan1111/EventPlanner | node_modules/node-gyp/gyp/pylib/gyp/__init__.py | 1524 | 22178 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
| mit |
sebadiaz/rethinkdb | test/rql_test/connections/http_support/werkzeug/serving.py | 145 | 27668 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import time
import signal
import subprocess
try:
import thread
except ImportError:
import _thread as thread
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import iteritems, PY2, reraise, text_type, \
wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError, BadRequest
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown':
shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is bytes, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
from OpenSSL import crypto
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'w') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'w') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from OpenSSL import SSL
cert, pkey = generate_adhoc_ssl_pair()
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def load_ssl_context(cert_file, pkey_file):
"""Loads an SSL context from a certificate and private key file."""
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(cert_file)
ctx.use_privatekey_file(pkey_file)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
def shutdown(self, arg=None):
try:
self._con.shutdown()
except Exception:
pass
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
##try:
## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
## socket.SOCK_STREAM, 0,
## socket.AI_PASSIVE)
## if info:
## return info[0][0]
##except socket.gaierror:
## pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def _iter_module_files():
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _reloader_stat_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
from itertools import chain
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def _reloader_inotify(extra_files=None, interval=None):
# Mutated by inotify loop when changes occur.
changed = [False]
# Setup inotify watches
from pyinotify import WatchManager, Notifier
# this API changed at one point, support both
try:
from pyinotify import EventsCodes as ec
ec.IN_ATTRIB
except (ImportError, AttributeError):
import pyinotify as ec
wm = WatchManager()
mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB
def signal_changed(event):
if changed[0]:
return
_log('info', ' * Detected change in %r, reloading' % event.path)
changed[:] = [True]
for fname in extra_files or ():
wm.add_watch(fname, mask, signal_changed)
# ... And now we wait...
notif = Notifier(wm)
try:
while not changed[0]:
# always reiterate through sys.modules, adding them
for fname in _iter_module_files():
wm.add_watch(fname, mask, signal_changed)
notif.process_events()
if notif.check_events(timeout=interval):
notif.read_events()
# TODO Set timeout to something small and check parent liveliness
finally:
notif.stop()
sys.exit(3)
# currently we always use the stat loop reloader for the simple reason
# that the inotify one does not respond to added files properly. Also
# it's quite buggy and the API is a mess.
reloader_loop = _reloader_stat_loop
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an OpenSSL
context, a tuple in the form ``(cert_file, pkey_file)``,
the string ``'adhoc'`` if the server should
automatically create one, or `None` to disable SSL
(which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
_log('info', ' * Running on %s://%s:%d/', ssl_context is None
and 'http' or 'https', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
| agpl-3.0 |
mfherbst/spack | var/spack/repos/builtin/packages/libuv/package.py | 5 | 1850 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libuv(AutotoolsPackage):
"""Multi-platform library with a focus on asynchronous IO"""
homepage = "http://libuv.org"
url = "https://github.com/libuv/libuv/archive/v1.9.0.tar.gz"
version('1.9.0', '14737f9c76123a19a290dabb7d1cd04c')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
def autoreconf(self, spec, prefix):
# This is needed because autogen.sh generates on-the-fly
# an m4 macro needed during configuration
bash = which("bash")
bash('autogen.sh')
| lgpl-2.1 |
capecchi/capecchi.github.io | posts/AmericaByTrain/length_builder.py | 1 | 2840 | #pep : current path endpoint (for finding next segment(s))
#ipath : array of indices on this path
#iredund : if available, indices to ignore (from previous juncture analyses)
#pl : path length to reach
def main(pep, ipath, iredund, pl, index, strt, end, cid, rb):
import numpy as np
import length_builder
#IDEA: at junction, scan all paths out for certain distance (100m?)
# and if any reconnect, add one path to iarr to eliminate quick doubles
#IF LENGTH REACHED
if len(ipath) == pl:
#print('path finished, length = ',len(ipath))
paths = np.load(rb+'path_temp.npy')
if len(paths) == 0: paths = [ipath]
else: paths = np.append(paths,[ipath],axis=0)
np.save(rb+'path_temp',paths)
#If not, keep going
else:
building = 1
while building:
strt_dist = [(pep[0]-p[0])**2+(pep[1]-p[1])**2 for p in strt]
end_dist = [(pep[0]-p[0])**2+(pep[1]-p[1])**2 for p in end]
isps = np.array([],dtype=int)
ieps = np.array([],dtype=int)
for i in index:
if strt_dist[i] < 1.e-25: isps = np.append(isps,i)
if end_dist[i] < 1.e-25: ieps = np.append(ieps,i)
iredund = np.load(rb+'redundant.npy')
if len(isps) > 0:
isps2 = np.array([],dtype=int)
for i in isps:
if i not in iredund and i not in ipath: isps2 = np.append(isps2,i)
isps = isps2
if len(ieps) > 0:
ieps2 = np.array([],dtype=int)
for i in ieps:
if i not in iredund and i not in ipath: ieps2 = np.append(ieps2,i)
ieps = ieps2
isegs = np.append(isps,-(ieps+1))
npts = len(isegs) #number of segments found
if npts == 0: #end of route found
building = 0
if npts == 1: #no bifurcation
ii = isegs[0]
if ii >= 0: #was a start-pt
pep = end[ii]
ipath = np.append(ipath,ii)
else: #was an end-pt
pep = strt[abs(ii)-1]
ipath = np.append(ipath,abs(ii)-1)
if len(ipath) == pl:
building = 0
length_builder.main(pep,ipath,iredund,pl,index,strt,end,cid,rb)
if npts > 1: #track bifurcation
building = 0
for ii in isegs:
if ii >= 0:
pep = end[ii]
ipath2 = np.append(ipath,ii)
else:
pep = strt[abs(ii)-1]
ipath2 = np.append(ipath,abs(ii)-1)
length_builder.main(pep,ipath2,iredund,pl,index,strt,end,cid,rb)
| mit |
gdimitris/ChessPuzzler | Virtual_Environment/lib/python2.7/site-packages/sqlalchemy/util/deprecations.py | 60 | 4403 | # util/deprecations.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Helpers related to deprecation of functions, methods, classes, other
functionality."""
from .. import exc
import warnings
import re
from .langhelpers import decorator
def warn_deprecated(msg, stacklevel=3):
warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel)
def warn_pending_deprecation(msg, stacklevel=3):
warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel)
def deprecated(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SADeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def pending_deprecation(version, message=None,
add_deprecation_to_docstring=True):
"""Decorates a function and issues a pending deprecation warning on use.
:param version:
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s (pending) %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SAPendingDeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def _sanitize_restructured_text(text):
def repl(m):
type_, name = m.group(1, 2)
if type_ in ("func", "meth"):
name += "()"
return name
return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text)
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
message = _sanitize_restructured_text(message)
@decorator
def warned(fn, *args, **kwargs):
warnings.warn(message, wtype, stacklevel=3)
return fn(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ''
if docstring_header is not None:
docstring_header %= dict(func=func.__name__)
doc = inject_docstring_text(doc, docstring_header, 1)
decorated = warned(func)
decorated.__doc__ = doc
return decorated
import textwrap
def _dedent_docstring(text):
split_text = text.split("\n", 1)
if len(split_text) == 1:
return text
else:
firstline, remaining = split_text
if not firstline.startswith(" "):
return firstline + "\n" + textwrap.dedent(remaining)
else:
return textwrap.dedent(text)
def inject_docstring_text(doctext, injecttext, pos):
doctext = _dedent_docstring(doctext or "")
lines = doctext.split('\n')
injectlines = textwrap.dedent(injecttext).split("\n")
if injectlines[0]:
injectlines.insert(0, "")
blanks = [num for num, line in enumerate(lines) if not line.strip()]
blanks.insert(0, 0)
inject_pos = blanks[min(pos, len(blanks) - 1)]
lines = lines[0:inject_pos] + injectlines + lines[inject_pos:]
return "\n".join(lines)
| mit |
tlemoult/spectroDb | spectrum/RR/get-spc-RR.py | 1 | 3788 | import datetime
import time
import json
from datetime import datetime
import sys
import os
import shutil
import glob
import libsdb.dbSpectro as dbSpectro
import libsdb.cds as cds # mes modules
from modEphem import *
def createPath(racine,path):
dirs=path.split("/")
current=racine # chemin absolu
partDir='' # chemin relatif
for d in dirs:
current=current+"/"+d
partDir=partDir+"/"+d
try:
os.stat(current)
except:
os.mkdir(current)
return (current,partDir)
print("Robot extrait les fichiers spectres RR Lyr traite")
objectId = 226 # RR Lyr in my data Base
#phiMin=0.2
#phiMax=0.4
phiMin=0.85
phiMax=0.96
directoryPerCycle=True
if len(sys.argv) !=7:
print(" ordre 34 (Ha), nomé Ha")
print(" ordre 38 (He 5876), nomé He5876")
print(" ordre 48 (He 4686), nomé He4686")
print("")
print("nombre d'argument incorrect")
print("utiliser: ")
print(" python get-spc-RR.py orderNo 34 ./spectrum/RRlyr/34/ date 2019-12-31 2020-12-31")
exit(1)
configFilePath="../../config/config.json"
db = dbSpectro.init_connection(configFilePath)
print("len(sys.argv)=" , len(sys.argv))
if len(sys.argv) == 7 and sys.argv[1] == 'orderNo' and sys.argv[4] == 'date':
orderNo=sys.argv[2]
destPath = sys.argv[3]
dateStart = sys.argv[5]
dateStop = sys.argv[6]
fileList = dbSpectro.getFilesSpcPerObjIdDate(db, objectId, orderNo, dateStart, dateStop)
json_text = open(configFilePath).read()
config = json.loads(json_text)
PathBaseSpectro = config['path']['archive'] + '/archive'
if orderNo == '34':
orderDirPath = "Ha"
elif orderNo == '38':
orderDirPath = "He5876_Na"
elif orderNo == '48':
orderDirPath = "He4686"
else:
orderDirPath = str(orderNo)
print(f" objectId = {objectId}")
print(f" orderNo = {orderNo}")
print(f" phi = [{phiMin} ... {phiMax}]")
print(f" dateStart = {dateStart} dateStop = {dateStop}")
print((" dossier source = "+ PathBaseSpectro))
print((" dossier destination = "+ destPath))
if directoryPerCycle:
print(f" sous dossier par cycle: orderDirPath = {orderDirPath}")
else:
print(f" tous les cycles dans le meme dossier")
"""
Je prends usuellement 0.87 a 0.95.
"""
i = 0
pulseDict={}
for f in fileList:
fileSource = PathBaseSpectro + f[0] + '/' + f[1]
fileDest = destPath + f[1]
dateUTC=f[2]
jd=Time(dateUTC, scale='utc').jd
phi = phase_RR_jd(jd)
psi = phase_RR_blasko_jd(jd)
nMax=int(jd/0.566793)-4333000
if phi > phiMin and phi < phiMax:
i = i +1
# print(f"n = {nMax}, jd = {jd}, phi = {phi}, fileSource = {fileSource} --> fileDest = {fileDest}")
if not str(nMax) in pulseDict:
pathDirDatePsi = str(dateUTC)[0:10].replace("-","")+"_TLE_RC36_psi"+formatPhase(psi)[2:4]
pulseDict[str(nMax)]={}
if directoryPerCycle:
pulseDict[str(nMax)]['pathDir']=pathDirDatePsi+"/"+orderDirPath
else:
pulseDict[str(nMax)]['pathDir']=orderDirPath
pulseDict[str(nMax)]['files']=[]
pulseDict[str(nMax)]['files'].append({"dirSrc":f[0] , "file": f[1] , "phi": phi})
print((str(i) + " files extracted"))
db.close()
for n in pulseDict.keys():
print(f"n = {n}, pathDir = {pulseDict[n]['pathDir']}")
createPath(destPath,pulseDict[n]['pathDir'])
for oneFile in pulseDict[n]['files']:
print(f" dirSrc = {oneFile['dirSrc']} file = {oneFile['file']}")
fileSource = PathBaseSpectro + oneFile['dirSrc'] + '/' + oneFile['file']
fileDest = destPath + '/' + pulseDict[n]['pathDir'] + '/' + oneFile['file'].replace(".fits","_phi"+formatPhase(oneFile['phi'])[2:4]+".fit")
print(f"fileSource = {fileSource} --> fileDest = {fileDest}")
shutil.copy(fileSource, fileDest)
| mit |
nick-thompson/servo | tests/wpt/web-platform-tests/webdriver/element_state/method_test.py | 65 | 3455 | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class GetElementAttributeTest(base_test.WebDriverBaseTest):
def test_get_element_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/element-with-id-attribute.html"))
el = self.driver.find_element_by_css("div")
self.assertEqual("myId", el.get_attribute("id"))
def test_style_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/element-with-style-attribute.html"))
el = self.driver.find_element_by_css("div")
expected_style = """
font-family: \"Gill Sans Extrabold\",Helvetica,sans-serif;
line-height: 1.2; font-weight: bold;
"""
self.assertEqual(expected_style, el.get_attribute("style"))
def test_color_serialization_of_style_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/element-with-color-style-attribute.html"))
el = self.driver.find_element_by_css("div")
self.assertEqual("color: rgba(255, 0, 0, 1.0);", el.get_attribute("style"))
def test_true_if_boolean_attribute_present(self):
self.driver.get(self.webserver.where_is("element_state/res/input-with-checked-attribute.html"))
el = self.driver.find_element_by_css("input")
self.assertEqual("true", el.get_attribute("checked"))
def test_none_if_boolean_attribute_absent(self):
self.driver.get(self.webserver.where_is("element_state/res/input-without-checked-attribute.html"))
el = self.driver.find_element_by_css("input")
self.assertIsNone(el.get_attribute("checked"))
def test_option_with_attribute_value(self):
self.driver.get(self.webserver.where_is("element_state/res/option-with-value-attribute.html"))
el = self.driver.find_element_by_css("option")
self.assertEqual("value1", el.get_attribute("value"))
def test_option_without_value_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/option-without-value-attribute.html"))
el = self.driver.find_element_by_css("option")
self.assertEqual("Value 1", el.get_attribute("value"))
def test_a_href_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/a-with-href-attribute.html"))
el = self.driver.find_element_by_css("a")
self.assertEqual("http://web-platform.test:8000/path#fragment", el.get_attribute("href"))
def test_img_src_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/img-with-src-attribute.html"))
el = self.driver.find_element_by_css("img")
self.assertEqual("http://web-platform.test:8000/images/blue.png", el.get_attribute("src"))
def test_custom_attribute(self):
self.driver.get(self.webserver.where_is("element_state/res/element-with-custom-attribute.html"))
el = self.driver.find_element_by_css("div")
self.assertEqual("attribute value", el.get_attribute("webdriver-custom-attribute"))
def test_attribute_not_present(self):
self.driver.get(self.webserver.where_is("element_state/res/element-without-attribute.html"))
el = self.driver.find_element_by_css("div")
self.assertIsNone(el.get_attribute("class"))
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
schwarz/youtube-dl | youtube_dl/extractor/scivee.py | 150 | 1894 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class SciVeeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?scivee\.tv/node/(?P<id>\d+)'
_TEST = {
'url': 'http://www.scivee.tv/node/62352',
'md5': 'b16699b74c9e6a120f6772a44960304f',
'info_dict': {
'id': '62352',
'ext': 'mp4',
'title': 'Adam Arkin at the 2014 DOE JGI Genomics of Energy & Environment Meeting',
'description': 'md5:81f1710638e11a481358fab1b11059d7',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# annotations XML is malformed
annotations = self._download_webpage(
'http://www.scivee.tv/assets/annotations/%s' % video_id, video_id, 'Downloading annotations')
title = self._html_search_regex(r'<title>([^<]+)</title>', annotations, 'title')
description = self._html_search_regex(r'<abstract>([^<]+)</abstract>', annotations, 'abstract', fatal=False)
filesize = int_or_none(self._html_search_regex(
r'<filesize>([^<]+)</filesize>', annotations, 'filesize', fatal=False))
formats = [
{
'url': 'http://www.scivee.tv/assets/audio/%s' % video_id,
'ext': 'mp3',
'format_id': 'audio',
},
{
'url': 'http://www.scivee.tv/assets/video/%s' % video_id,
'ext': 'mp4',
'format_id': 'video',
'filesize': filesize,
},
]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id,
'formats': formats,
}
| unlicense |
DebugBill/domoticz | plugins/examples/DenonMarantz.py | 15 | 23003 | #
# Denon AVR 4306 Plugin
#
# Author: Dnpwwo, 2016 - 2017
#
# Mode4 ("Sources") needs to have '|' delimited names of sources that the Denon knows about. The Selector can be changed afterwards to any text and the plugin will still map to the actual Denon name.
#
"""
<plugin key="Denon4306" version="3.2.0" name="Denon/Marantz Amplifier" author="dnpwwo" wikilink="" externallink="http://www.denon.co.uk/uk">
<description>
Denon (& Marantz) AVR Plugin.<br/><br/>
"Sources" need to have '|' delimited names of sources that the Denon knows about from the technical manual.<br/>
The Sources Selector(s) can be changed after initial creation to any text and the plugin will still map to the actual Denon name.<br/><br/>
Devices will be created in the Devices Tab only and will need to be manually made active.<br/><br/>
Auto-discovery is known to work on Linux but may not on Windows.
</description>
<params>
<param field="Port" label="Port" width="30px" required="true" default="23"/>
<param field="Mode1" label="Auto-Detect" width="75px">
<options>
<option label="True" value="Discover" default="true"/>
<option label="False" value="Fixed" />
</options>
</param>
<param field="Address" label="IP Address" width="200px"/>
<param field="Mode2" label="Discovery Match" width="250px" default="SDKClass=Receiver"/>
<param field="Mode3" label="Startup Delay" width="50px" required="true">
<options>
<option label="2" value="2"/>
<option label="3" value="3"/>
<option label="4" value="4" default="true" />
<option label="5" value="5"/>
<option label="6" value="6"/>
<option label="7" value="7"/>
<option label="10" value="10"/>
</options>
</param>
<param field="Mode4" label="Sources" width="550px" required="true" default="Off|DVD|VDP|TV|CD|DBS|Tuner|Phono|VCR-1|VCR-2|V.Aux|CDR/Tape|AuxNet|AuxIPod"/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import base64
import datetime
class BasePlugin:
DenonConn = None
oustandingPings = 0
powerOn = False
mainOn = False
mainSource = 0
mainVolume1 = 0
zone2On = False
zone2Source = 0
zone2Volume = 0
zone3On = False
zone3Source = 0
zone3Volume = 0
ignoreMessages = "|SS|SV|SD|MS|PS|CV|SY|TP|"
selectorMap = {}
pollingDict = {"PW":"ZM?\r", "ZM":"SI?\r", "SI":"MV?\r", "MV":"MU?\r", "MU":"PW?\r" }
lastMessage = "PW"
lastHeartbeat = datetime.datetime.now()
SourceOptions = {}
def onStart(self):
if Parameters["Mode6"] == "Debug":
Domoticz.Debugging(1)
self.SourceOptions = {'LevelActions': '|'*Parameters["Mode4"].count('|'),
'LevelNames': Parameters["Mode4"],
'LevelOffHidden': 'false',
'SelectorStyle': '1'}
if (len(Devices) == 0):
Domoticz.Device(Name="Power", Unit=1, TypeName="Switch", Image=5).Create()
Domoticz.Device(Name="Main Zone", Unit=2, TypeName="Selector Switch", Switchtype=18, Image=5, Options=self.SourceOptions).Create()
Domoticz.Device(Name="Main Volume", Unit=3, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
else:
if (2 in Devices and (len(Devices[2].sValue) > 0)):
self.mainSource = int(Devices[2].sValue)
self.mainOn = (Devices[2].nValue != 0)
if (3 in Devices and (len(Devices[3].sValue) > 0)):
self.mainVolume1 = int(Devices[3].sValue) if (Devices[3].nValue != 0) else int(Devices[3].sValue)*-1
if (4 in Devices and (len(Devices[4].sValue) > 0)):
self.zone2Source = int(Devices[4].sValue)
self.zone2On = (Devices[4].nValue != 0)
if (5 in Devices and (len(Devices[5].sValue) > 0)):
self.zone2Volume = int(Devices[5].sValue) if (Devices[5].nValue != 0) else int(Devices[5].sValue)*-1
if (6 in Devices and (len(Devices[6].sValue) > 0)):
self.zone3Source = int(Devices[6].sValue)
self.zone3On = (Devices[6].nValue != 0)
if (7 in Devices and (len(Devices[7].sValue) > 0)):
self.zone3Volume = int(Devices[7].sValue) if (Devices[7].nValue != 0) else int(Devices[7].sValue)*-1
if (1 in Devices):
self.powerOn = (self.mainOn or self.zone2On or self.zone3On)
DumpConfigToLog()
dictValue=0
for item in Parameters["Mode4"].split('|'):
self.selectorMap[dictValue] = item
dictValue = dictValue + 10
self.handleConnect()
return
def onConnect(self, Connection, Status, Description):
if (Connection == self.DenonConn):
if (Status == 0):
Domoticz.Log("Connected successfully to: "+Connection.Address+":"+Connection.Port)
self.DenonConn.Send('PW?\r')
self.DenonConn.Send('ZM?\r', Delay=1)
self.DenonConn.Send('Z2?\r', Delay=2)
self.DenonConn.Send('Z3?\r', Delay=3)
else:
if (Description.find("Only one usage of each socket address") > 0):
Domoticz.Log(Connection.Address+":"+Connection.Port+" is busy, waiting.")
else:
Domoticz.Log("Failed to connect ("+str(Status)+") to: "+Connection.Address+":"+Connection.Port+" with error: "+Description)
self.DenonConn = None
self.powerOn = False
self.SyncDevices(1)
def onMessage(self, Connection, Data):
strData = Data.decode("utf-8", "ignore")
Domoticz.Debug("onMessage called with Data: '"+str(strData)+"'")
self.oustandingPings = 0
try:
# Beacon messages to find the amplifier
if (Connection.Name == "Beacon"):
dictAMXB = DecodeDDDMessage(strData)
if (strData.find(Parameters["Mode2"]) >= 0):
self.DenonConn = None
self.DenonConn = Domoticz.Connection(Name="Telnet", Transport="TCP/IP", Protocol="Line", Address=Connection.Address, Port=Parameters["Port"])
self.DenonConn.Connect()
try:
Domoticz.Log(dictAMXB['Make']+", "+dictAMXB['Model']+" Receiver discovered successfully at address: "+Connection.Address)
except KeyError:
Domoticz.Log("'Unknown' Receiver discovered successfully at address: "+Connection.Address)
else:
try:
Domoticz.Log("Discovery message for Class: '"+dictAMXB['SDKClass']+"', Make '"+dictAMXB['Make']+"', Model '"+dictAMXB['Model']+"' seen at address: "+Connection.Address)
except KeyError:
Domoticz.Log("Discovery message '"+str(strData)+"' seen at address: "+Connection.Address)
# Otherwise handle amplifier
else:
strData = strData.strip()
action = strData[0:2]
detail = strData[2:]
if (action in self.pollingDict): self.lastMessage = action
if (action == "PW"): # Power Status
if (detail == "STANDBY"):
self.powerOn = False
elif (detail == "ON"):
self.powerOn = True
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "ZM"): # Main Zone on/off
if (detail == "ON"):
self.mainOn = True
elif (detail == "OFF"):
self.mainOn = False
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "SI"): # Main Zone Source Input
for key, value in self.selectorMap.items():
if (detail == value): self.mainSource = key
elif (action == "MV"): # Master Volume
if (detail.isdigit()):
if (abs(self.mainVolume1) != int(detail[0:2])): self.mainVolume1 = int(detail[0:2])
elif (detail[0:3] == "MAX"): Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
else: Domoticz.Log("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "MU"): # Overall Mute
if (detail == "ON"): self.mainVolume1 = abs(self.mainVolume1)*-1
elif (detail == "OFF"): self.mainVolume1 = abs(self.mainVolume1)
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "Z2"): # Zone 2
# Zone 2 response, make sure we have Zone 2 devices in Domoticz and they are polled
if (4 not in Devices):
LevelActions = '|'*Parameters["Mode4"].count('|')
Domoticz.Device(Name="Zone 2", Unit=4, TypeName="Selector Switch", Switchtype=18, Image=5, Options=self.SourceOptions).Create()
Domoticz.Log("Zone 2 responded, devices added.")
if (5 not in Devices):
Domoticz.Device(Name="Volume 2", Unit=5, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
if ("Z2" not in self.pollingDict):
self.pollingDict = {"PW":"ZM?\r", "ZM":"SI?\r", "SI":"MV?\r", "MV":"MU?\r", "MU":"Z2?\r", "Z2":"PW?\r" }
if (detail == "ON"):
self.zone2On = True
elif (detail == "OFF"):
self.zone2On = False
elif (detail == "MUON"):
self.zone2Volume = abs(self.zone2Volume)*-1
elif (detail == "MUOFF"):
self.zone2Volume = abs(self.zone2Volume)
elif (detail.isdigit()):
if (abs(self.zone2Volume) != int(detail[0:2])): self.zone2Volume = int(detail[0:2])
else:
for key, value in self.selectorMap.items():
if (detail == value): self.zone2Source = key
elif (action == "Z3"): # Zone 3
# Zone 3 response, make sure we have Zone 3 devices in Domoticz and they are polled
if (6 not in Devices):
LevelActions = '|'*Parameters["Mode4"].count('|')
Domoticz.Device(Name="Zone 3", Unit=6, TypeName="Selector Switch", Switchtype=18, Image=5, Options=self.SourceOptions).Create()
Domoticz.Log("Zone 3 responded, devices added.")
if (7 not in Devices):
Domoticz.Device(Name="Volume 3", Unit=7, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
if ("Z3" not in self.pollingDict):
self.pollingDict = {"PW":"ZM?\r", "ZM":"SI?\r", "SI":"MV?\r", "MV":"MU?\r", "MU":"Z2?\r", "Z2":"Z3?\r", "Z3":"PW?\r" }
if (detail == "ON"):
self.zone3On = True
elif (detail == "OFF"):
self.zone3On = False
elif (detail == "MUON"):
self.zone3Volume = abs(self.zone3Volume)*-1
elif (detail == "MUOFF"):
self.zone3Volume = abs(self.zone3Volume)
elif (detail.isdigit()):
if (abs(self.zone3Volume) != int(detail[0:2])): self.zone3Volume = int(detail[0:2])
else:
for key, value in self.selectorMap.items():
if (detail == value): self.zone3Source = key
else:
if (self.ignoreMessages.find(action) < 0):
Domoticz.Debug("Unknown message '"+action+"' ignored.")
self.SyncDevices(0)
except Exception as inst:
Domoticz.Error("Exception in onMessage, called with Data: '"+str(strData)+"'")
Domoticz.Error("Exception detail: '"+str(inst)+"'")
raise
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
Command = Command.strip()
action, sep, params = Command.partition(' ')
action = action.capitalize()
params = params.capitalize()
delay = 0
if (self.powerOn == False):
delay = int(Parameters["Mode3"])
else:
# Amp will ignore commands if it is responding to a heartbeat so delay send
lastHeartbeatDelta = (datetime.datetime.now()-self.lastHeartbeat).total_seconds()
if (lastHeartbeatDelta < 0.5):
delay = 1
Domoticz.Log("Last heartbeat was "+str(lastHeartbeatDelta)+" seconds ago, delaying command send.")
if (Unit == 1): # Main power switch
if (action == "On"):
self.DenonConn.Send(Message='PWON\r')
elif (action == "Off"):
self.DenonConn.Send(Message='PWSTANDBY\r', Delay=delay)
# Main Zone devices
elif (Unit == 2): # Main selector
if (action == "On"):
self.DenonConn.Send(Message='ZMON\r')
elif (action == "Set"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
self.DenonConn.Send(Message='SI'+self.selectorMap[Level]+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='ZMOFF\r', Delay=delay)
elif (Unit == 3): # Main Volume control
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (action == "On"):
self.DenonConn.Send(Message='MUOFF\r', Delay=delay)
elif (action == "Set"):
self.DenonConn.Send(Message='MV'+str(Level)+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='MUON\r', Delay=delay)
# Zone 2 devices
elif (Unit == 4): # Zone 2 selector
if (action == "On"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
self.DenonConn.Send(Message='Z2ON\r', Delay=delay)
elif (action == "Set"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone2On == False):
self.DenonConn.Send(Message='Z2ON\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z2'+self.selectorMap[Level]+'\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z2?\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z2OFF\r', Delay=delay)
elif (Unit == 5): # Zone 2 Volume control
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone2On == False):
self.DenonConn.Send(Message='Z2ON\r', Delay=delay)
delay += 1
if (action == "On"):
self.DenonConn.Send(Message='Z2MUOFF\r', Delay=delay)
elif (action == "Set"):
self.DenonConn.Send(Message='Z2'+str(Level)+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z2MUON\r', Delay=delay)
# Zone 3 devices
elif (Unit == 6): # Zone 3 selector
if (action == "On"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
self.DenonConn.Send(Message='Z3ON\r', Delay=delay)
elif (action == "Set"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone3On == False):
self.DenonConn.Send(Message='Z3ON\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z3'+self.selectorMap[Level]+'\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z3?\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z3OFF\r', Delay=delay)
elif (Unit == 7): # Zone 3 Volume control
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone3On == False):
self.DenonConn.Send(Message='Z3ON\r', Delay=delay)
delay += 1
if (action == "On"):
self.DenonConn.Send(Message='Z3MUOFF\r', Delay=delay)
elif (action == "Set"):
self.DenonConn.Send(Message='Z3'+str(Level)+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z3MUON\r', Delay=delay)
return
def onDisconnect(self, Connection):
Domoticz.Error("Disconnected from: "+Connection.Address+":"+Connection.Port)
self.SyncDevices(1)
return
def onHeartbeat(self):
Domoticz.Debug("onHeartbeat called, last response seen "+str(self.oustandingPings)+" heartbeats ago.")
if (self.DenonConn == None):
self.handleConnect()
else:
if (self.DenonConn.Name == "Telnet") and (self.DenonConn.Connected()):
self.DenonConn.Send(self.pollingDict[self.lastMessage])
Domoticz.Debug("onHeartbeat: self.lastMessage "+self.lastMessage+", Sending '"+self.pollingDict[self.lastMessage][0:2]+"'.")
if (self.oustandingPings > 5):
Domoticz.Error(self.DenonConn.Name+" has not responded to 5 pings, terminating connection.")
self.DenonConn = None
self.powerOn = False
self.oustandingPings = -1
self.oustandingPings = self.oustandingPings + 1
self.lastHeartbeat = datetime.datetime.now()
def handleConnect(self):
self.SyncDevices(1)
self.DenonConn = None
if Parameters["Mode1"] == "Discover":
Domoticz.Log("Using auto-discovery mode to detect receiver as specified in parameters.")
self.DenonConn = Domoticz.Connection(Name="Beacon", Transport="UDP/IP", Address="239.255.250.250", Port=str(9131))
self.DenonConn.Listen()
else:
self.DenonConn = Domoticz.Connection(Name="Telnet", Transport="TCP/IP", Protocol="Line", Address=Parameters["Address"], Port=Parameters["Port"])
self.DenonConn.Connect()
def SyncDevices(self, TimedOut):
if (self.powerOn == False):
UpdateDevice(1, 0, "Off", TimedOut)
UpdateDevice(2, 0, "0", TimedOut)
UpdateDevice(3, 0, str(abs(self.mainVolume1)), TimedOut)
UpdateDevice(4, 0, "0", TimedOut)
UpdateDevice(5, 0, str(abs(self.zone2Volume)), TimedOut)
UpdateDevice(6, 0, "0", TimedOut)
UpdateDevice(7, 0, str(abs(self.zone3Volume)), TimedOut)
else:
UpdateDevice(1, 1, "On", TimedOut)
UpdateDevice(2, self.mainSource if self.mainOn else 0, str(self.mainSource if self.mainOn else 0), TimedOut)
if (self.mainVolume1 <= 0 or self.mainOn == False): UpdateDevice(3, 0, str(abs(self.mainVolume1)), TimedOut)
else: UpdateDevice(3, 2, str(self.mainVolume1), TimedOut)
UpdateDevice(4, self.zone2Source if self.zone2On else 0, str(self.zone2Source if self.zone2On else 0), TimedOut)
if (self.zone2Volume <= 0 or self.zone2On == False): UpdateDevice(5, 0, str(abs(self.zone2Volume)), TimedOut)
else: UpdateDevice(5, 2, str(self.zone2Volume), TimedOut)
UpdateDevice(6, self.zone3Source if self.zone3On else 0, str(self.zone3Source if self.zone3On else 0), TimedOut)
if (self.zone3Volume <= 0 or self.zone3On == False): UpdateDevice(7, 0, str(abs(self.zone3Volume)), TimedOut)
else: UpdateDevice(7, 2, str(self.zone3Volume), TimedOut)
return
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
def UpdateDevice(Unit, nValue, sValue, TimedOut):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue) or (Devices[Unit].TimedOut != TimedOut):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue), TimedOut=TimedOut)
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Internal ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("External ID: '" + str(Devices[x].DeviceID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
def DecodeDDDMessage(Message):
# Sample discovery message
# AMXB<-SDKClass=Receiver><-Make=DENON><-Model=AVR-4306>
strChunks = Message.strip()
strChunks = strChunks[4:len(strChunks)-1].replace("<-","")
dirChunks = dict(item.split("=") for item in strChunks.split(">"))
return dirChunks
| gpl-3.0 |
sinuos/FreeNOS | site_scons/phony.py | 15 | 1319 | #
# Copyright (C) 2010 Niek Linnenbank
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from SCons.Script import *
import os
#
# Always execute the given commands as SCons targets.
#
# @author GregNoel
# @see http://www.scons.org/wiki/PhonyTargets
#
def Targets(env, **kw):
# Generate an environment, if not given.
if not env: env = DefaultEnvironment()
# Make sure to pass the whole environment to the command.
env.Append(ENV = os.environ)
# Execute all targets
for target,action in kw.items():
env.AlwaysBuild(env.Alias(target, [], action))
#
# Add ourselves to the given environment.
#
def generate(env):
env.AddMethod(Targets)
#
# We always exist.
#
def exists(env):
return True
| gpl-3.0 |
whutch/atria | cwmud/core/protocols/__init__.py | 2 | 2361 | # -*- coding: utf-8 -*-
"""Transport protocol implementations."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from time import sleep
from ..logs import get_logger
from ..messages import get_pubsub
log = get_logger("protocols")
class ProtocolServer:
"""A server for a specific transport protocol.
This is an abstract base class.
"""
def __init__(self):
"""Create a new server."""
self._handlers = set()
self._started = False
@property
def is_started(self):
"""Return whether the server is started or not."""
return self._started
def get_handler(self, uid):
"""Find a handler by its UID.
:param uid: The UID to search for
:returns WebSocketHandler: The found handler or None
"""
for handler in self._handlers:
if handler.uid == uid:
return handler
def start(self):
"""Start the server."""
self._started = True
def stop(self):
"""Stop the server."""
self._started = False
def poll(self):
"""Poll the server to process any queued IO."""
raise NotImplementedError
def serve(self):
"""Continuously serve protocol IO.
This should be a blocking function that runs until the
server is stopped.
"""
if not self.is_started:
self.start()
try:
while self.is_started:
self.poll()
sleep(0.025)
except KeyboardInterrupt:
pass
finally:
self.stop()
class ProtocolHandler:
"""A client handler for a specific transport protocol.
This is an abstract base class.
"""
def __init__(self, uid=None):
"""Create a new client handler."""
self._messages = get_pubsub()
self._uid = uid
@property
def uid(self):
"""Return a unique identifier for this client."""
return self._uid
@property
def alive(self):
"""Return whether this handler's client is alive or not."""
return False
def poll(self):
"""Poll this handler to process any queued IO."""
raise NotImplementedError
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/test/test_fileio.py | 80 | 15460 | # Adapted from test_file.py by Daniel Stutzbach
import sys
import os
import io
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.support import TESTFN, check_warnings, run_unittest, make_bad_fd, cpython_only
from collections import UserList
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(bytes([1, 2]))
self.f.close()
a = array('b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array('b', [1, 2]), a[:n])
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
self.assertRaises(TypeError, self.f.writelines, "abc")
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def test_reject(self):
self.assertRaises(TypeError, self.f.write, "Hello!")
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode=%r>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode=%r>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read', 'readinto',
'seek', 'tell', 'truncate', 'write', 'seekable',
'readable', 'writable']
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except OSError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised OSError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(OSError) as cm:
_FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised OSError")
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except OSError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array('b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _FileIO("/dev/tty", "a")
except OSError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with _FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testConstructorHandlesNULChars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, _FileIO, fn_with_NUL, 'w')
self.assertRaises(TypeError, _FileIO, bytes(fn_with_NUL, 'ascii'), 'w')
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(OSError, msvcrt.get_osfhandle, make_bad_fd())
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MIN - 1)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, io.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, io.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(_FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
studiomobile/protobuf-objc | python/google/protobuf/internal/type_checkers.py | 9 | 12239 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field type to an unbound Encoder method F, such that
# F(encoder, field_number, value) will append the serialization
# of a value of this type to the encoder.
_Encoder = encoder.Encoder
TYPE_TO_SERIALIZE_METHOD = {
_FieldDescriptor.TYPE_DOUBLE: _Encoder.AppendDouble,
_FieldDescriptor.TYPE_FLOAT: _Encoder.AppendFloat,
_FieldDescriptor.TYPE_INT64: _Encoder.AppendInt64,
_FieldDescriptor.TYPE_UINT64: _Encoder.AppendUInt64,
_FieldDescriptor.TYPE_INT32: _Encoder.AppendInt32,
_FieldDescriptor.TYPE_FIXED64: _Encoder.AppendFixed64,
_FieldDescriptor.TYPE_FIXED32: _Encoder.AppendFixed32,
_FieldDescriptor.TYPE_BOOL: _Encoder.AppendBool,
_FieldDescriptor.TYPE_STRING: _Encoder.AppendString,
_FieldDescriptor.TYPE_GROUP: _Encoder.AppendGroup,
_FieldDescriptor.TYPE_MESSAGE: _Encoder.AppendMessage,
_FieldDescriptor.TYPE_BYTES: _Encoder.AppendBytes,
_FieldDescriptor.TYPE_UINT32: _Encoder.AppendUInt32,
_FieldDescriptor.TYPE_ENUM: _Encoder.AppendEnum,
_FieldDescriptor.TYPE_SFIXED32: _Encoder.AppendSFixed32,
_FieldDescriptor.TYPE_SFIXED64: _Encoder.AppendSFixed64,
_FieldDescriptor.TYPE_SINT32: _Encoder.AppendSInt32,
_FieldDescriptor.TYPE_SINT64: _Encoder.AppendSInt64,
}
TYPE_TO_NOTAG_SERIALIZE_METHOD = {
_FieldDescriptor.TYPE_DOUBLE: _Encoder.AppendDoubleNoTag,
_FieldDescriptor.TYPE_FLOAT: _Encoder.AppendFloatNoTag,
_FieldDescriptor.TYPE_INT64: _Encoder.AppendInt64NoTag,
_FieldDescriptor.TYPE_UINT64: _Encoder.AppendUInt64NoTag,
_FieldDescriptor.TYPE_INT32: _Encoder.AppendInt32NoTag,
_FieldDescriptor.TYPE_FIXED64: _Encoder.AppendFixed64NoTag,
_FieldDescriptor.TYPE_FIXED32: _Encoder.AppendFixed32NoTag,
_FieldDescriptor.TYPE_BOOL: _Encoder.AppendBoolNoTag,
_FieldDescriptor.TYPE_UINT32: _Encoder.AppendUInt32NoTag,
_FieldDescriptor.TYPE_ENUM: _Encoder.AppendEnumNoTag,
_FieldDescriptor.TYPE_SFIXED32: _Encoder.AppendSFixed32NoTag,
_FieldDescriptor.TYPE_SFIXED64: _Encoder.AppendSFixed64NoTag,
_FieldDescriptor.TYPE_SINT32: _Encoder.AppendSInt32NoTag,
_FieldDescriptor.TYPE_SINT64: _Encoder.AppendSInt64NoTag,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
# Maps from field type to an unbound Decoder method F,
# such that F(decoder) will read a field of the requested type.
#
# Note that Message and Group are intentionally missing here.
# They're handled by _RecursivelyMerge().
_Decoder = decoder.Decoder
TYPE_TO_DESERIALIZE_METHOD = {
_FieldDescriptor.TYPE_DOUBLE: _Decoder.ReadDouble,
_FieldDescriptor.TYPE_FLOAT: _Decoder.ReadFloat,
_FieldDescriptor.TYPE_INT64: _Decoder.ReadInt64,
_FieldDescriptor.TYPE_UINT64: _Decoder.ReadUInt64,
_FieldDescriptor.TYPE_INT32: _Decoder.ReadInt32,
_FieldDescriptor.TYPE_FIXED64: _Decoder.ReadFixed64,
_FieldDescriptor.TYPE_FIXED32: _Decoder.ReadFixed32,
_FieldDescriptor.TYPE_BOOL: _Decoder.ReadBool,
_FieldDescriptor.TYPE_STRING: _Decoder.ReadString,
_FieldDescriptor.TYPE_BYTES: _Decoder.ReadBytes,
_FieldDescriptor.TYPE_UINT32: _Decoder.ReadUInt32,
_FieldDescriptor.TYPE_ENUM: _Decoder.ReadEnum,
_FieldDescriptor.TYPE_SFIXED32: _Decoder.ReadSFixed32,
_FieldDescriptor.TYPE_SFIXED64: _Decoder.ReadSFixed64,
_FieldDescriptor.TYPE_SINT32: _Decoder.ReadSInt32,
_FieldDescriptor.TYPE_SINT64: _Decoder.ReadSInt64,
}
| bsd-3-clause |
madmax983/h2o-3 | h2o-py/h2o/model/dim_reduction.py | 2 | 2505 | from model_base import ModelBase
from metrics_base import *
class H2ODimReductionModel(ModelBase):
def num_iterations(self):
"""
Get the number of iterations that it took to converge or reach max iterations.
:return: number of iterations (integer)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('number_of_iterations')]
def objective(self):
"""
Get the final value of the objective function from the GLRM model.
:return: final objective value (double)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_objective_value')]
def final_step(self):
"""
Get the final step size from the GLRM model.
:return: final step size (double)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_step_size')]
def archetypes(self):
"""
:return: the archetypes (Y) of the GLRM model.
"""
o = self._model_json["output"]
yvals = o["archetypes"].cell_values
archetypes = []
for yidx, yval in enumerate(yvals):
archetypes.append(list(yvals[yidx])[1:])
return archetypes
def screeplot(self, type="barplot", **kwargs):
"""
Produce the scree plot
:param type: type of plot. "barplot" and "lines" currently supported
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if 'server' in kwargs.keys() and kwargs['server']: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print "matplotlib is required for this function!"
return
variances = [s**2 for s in self._model_json['output']['importance'].cell_values[0][1:]]
plt.xlabel('Components')
plt.ylabel('Variances')
plt.title('Scree Plot')
plt.xticks(range(1,len(variances)+1))
if type == "barplot": plt.bar(range(1,len(variances)+1), variances)
elif type == "lines": plt.plot(range(1,len(variances)+1), variances, 'b--')
if not ('server' in kwargs.keys() and kwargs['server']): plt.show() | apache-2.0 |
awslabs/chalice | chalice/cli/reloader.py | 1 | 4475 | """Automatically reload chalice app when files change.
How It Works
============
This approach borrow from what django, flask, and other frameworks do.
Essentially, with reloading enabled ``chalice local`` will start up
a worker process that runs the dev http server. This means there will
be a total of two processes running (both will show as ``chalice local``
in ps). One process is the parent process. It's job is to start up a child
process and restart it if it exits (due to a restart request). The child
process is the process that actually starts up the web server for local mode.
The child process also sets up a watcher thread. It's job is to monitor
directories for changes. If a change is encountered it sys.exit()s the process
with a known RC (the RESTART_REQUEST_RC constant in the module).
The parent process runs in an infinite loop. If the child process exits with
an RC of RESTART_REQUEST_RC the parent process starts up another child process.
The child worker is denoted by setting the ``CHALICE_WORKER`` env var.
If this env var is set, the process is intended to be a worker process (as
opposed the parent process which just watches for restart requests from the
worker process).
"""
import subprocess
import logging
import copy
import sys
from typing import MutableMapping, Type, Callable, Optional # noqa
from chalice.cli.filewatch import RESTART_REQUEST_RC, WorkerProcess
from chalice.local import LocalDevServer, HTTPServerThread # noqa
LOGGER = logging.getLogger(__name__)
WorkerProcType = Optional[Type[WorkerProcess]]
def get_best_worker_process():
# type: () -> Type[WorkerProcess]
try:
from chalice.cli.filewatch.eventbased import WatchdogWorkerProcess
LOGGER.debug("Using watchdog worker process.")
return WatchdogWorkerProcess
except ImportError:
from chalice.cli.filewatch.stat import StatWorkerProcess
LOGGER.debug("Using stat() based worker process.")
return StatWorkerProcess
def start_parent_process(env):
# type: (MutableMapping) -> None
process = ParentProcess(env, subprocess.Popen)
process.main()
def start_worker_process(server_factory, root_dir, worker_process_cls=None):
# type: (Callable[[], LocalDevServer], str, WorkerProcType) -> int
if worker_process_cls is None:
worker_process_cls = get_best_worker_process()
t = HTTPServerThread(server_factory)
worker = worker_process_cls(t)
LOGGER.debug("Starting worker...")
rc = worker.main(root_dir)
LOGGER.info("Restarting local dev server.")
return rc
class ParentProcess(object):
"""Spawns a child process and restarts it as needed."""
def __init__(self, env, popen):
# type: (MutableMapping, Type[subprocess.Popen]) -> None
self._env = copy.copy(env)
self._popen = popen
def main(self):
# type: () -> None
# This method launches a child worker and restarts it if it
# exits with RESTART_REQUEST_RC. This method doesn't return.
# A user can Ctrl-C to stop the parent process.
while True:
self._env['CHALICE_WORKER'] = 'true'
LOGGER.debug("Parent process starting child worker process...")
process = self._popen(sys.argv, env=self._env)
try:
process.communicate()
if process.returncode != RESTART_REQUEST_RC:
return
except KeyboardInterrupt:
process.terminate()
raise
def run_with_reloader(server_factory, env, root_dir, worker_process_cls=None):
# type: (Callable, MutableMapping, str, WorkerProcType) -> int
# This function is invoked in two possible modes, as the parent process
# or as a chalice worker.
try:
if env.get('CHALICE_WORKER') is not None:
# This is a chalice worker. We need to start the main dev server
# in a daemon thread and install a file watcher.
return start_worker_process(server_factory, root_dir,
worker_process_cls)
else:
# This is the parent process. It's just is to spawn an identical
# process but with the ``CHALICE_WORKER`` env var set. It then
# will monitor this process and restart it if it exits with a
# RESTART_REQUEST exit code.
start_parent_process(env)
except KeyboardInterrupt:
pass
return 0
| apache-2.0 |
yamila-moreno/django | django/middleware/csrf.py | 155 | 8477 | """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import logging
import re
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.encoding import force_text
from django.utils.http import same_origin
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
request.META["CSRF_COOKIE_USED"] = True
return request.META["CSRF_COOKIE"]
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_key(),
})
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = force_text(
request.META.get('HTTP_REFERER'),
strings_only=True,
errors='replace'
)
if referer is None:
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port.
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
if not constant_time_compare(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
| bsd-3-clause |
softlayer/jumpgate | jumpgate/common/sl/auth.py | 4 | 4421 | import logging
import time
from jumpgate.common import exceptions
from jumpgate.common import utils
from jumpgate.identity.drivers import core as identity
from oslo.config import cfg
import SoftLayer
USER_MASK = 'id, username, accountId'
LOG = logging.getLogger(__name__)
def get_token_details(token, tenant_id=None):
try:
token_id_driver = identity.token_id_driver()
token_details = token_id_driver.token_from_id(token)
except (TypeError, ValueError):
raise exceptions.Unauthorized('Invalid Key')
if time.time() > token_details['expires']:
raise exceptions.Unauthorized('Expired Key')
if tenant_id and str(token_details['tenant_id']) != tenant_id:
raise exceptions.Unauthorized('Tenant/token Mismatch')
return token_details
def get_new_token_v3(credentials):
token_driver = identity.token_driver()
token_id = utils.lookup(credentials, 'auth', 'identity', 'token', 'id')
if token_id:
token = identity.token_id_driver().token_from_id(token_id)
LOG.debug("token details are: %s", str(token))
token_driver.validate_token(token)
username = token_driver.username(token)
credential = token_driver.credential(token)
userinfo = {'username': username,
'auth_type': str(token['auth_type']),
'tenant_id': str(token['tenant_id']),
'expires': token['expires']}
if token['auth_type'] == 'token':
userinfo['tokenHash'] = credential
if token['auth_type'] == 'api_key':
userinfo['api_key'] = credential
user = {'id': token['user_id'],
'username': username,
'accountId': token['tenant_id']}
return userinfo, user
username = utils.lookup(credentials,
'auth',
'identity',
'password',
'user',
'name')
credential = utils.lookup(credentials,
'auth',
'identity',
'password',
'user',
'password')
# If the 'password' is the right length, treat it as an API api_key
if len(credential) == 64:
endpoint = cfg.CONF['softlayer']['endpoint']
client = SoftLayer.Client(username=username,
api_key=credential,
endpoint_url=endpoint,
proxy=cfg.CONF['softlayer']['proxy'])
user = client['Account'].getCurrentUser(mask=USER_MASK)
username = token_driver.username(user)
return {'username': username,
'api_key': credential,
'auth_type': 'api_key',
'tenant_id': str(user['accountId']),
'expires': time.time() + (60 * 60 * 24)}, user
else:
endpoint = cfg.CONF['softlayer']['endpoint']
client = SoftLayer.Client(endpoint_url=endpoint,
proxy=cfg.CONF['softlayer']['proxy'])
client.auth = None
try:
userId, tokenHash = client.authenticate_with_password(username,
credential)
user = client['Account'].getCurrentUser(mask=USER_MASK)
username = token_driver.username(user)
return {'userId': userId,
'username': username,
'tokenHash': tokenHash,
'auth_type': 'token',
'tenant_id': str(user['accountId']),
'expires': time.time() + (60 * 60 * 24)}, user
except SoftLayer.SoftLayerAPIError as e:
if e.faultCode == 'SoftLayer_Exception_User_Customer_LoginFailed':
raise exceptions.Unauthorized(e.faultString)
raise
def get_auth(token_details):
if token_details['auth_type'] == 'api_key':
return SoftLayer.BasicAuthentication(token_details['username'],
token_details['api_key'])
elif token_details['auth_type'] == 'token':
return SoftLayer.TokenAuthentication(token_details['user_id'],
token_details['api_key'])
return None
| mit |
shacker6868/signatumclassicd | share/qt/extract_strings_qt.py | 1294 | 1784 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
bwhitelock/garmon-ng | plugins/dtc_connected/dtc_connected.py | 1 | 4480 | #!/usr/bin/python
#
# dtc_connected.py
#
# Copyright (C) Ben Van Mechelen 2007-2009 <me@benvm.be>
#
# This file is part of Garmon
#
# Garmon is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
from gettext import gettext as _
import gobject
import gtk
import garmon
from garmon.plugin import Plugin
import garmon.obd_device
from garmon.obd_device import OBDPortError
__name = _('DTC Connected')
__version = '0.2'
__author = 'Ben Van Mechelen'
__description = _('illuminates an indicator showing connection status.')
__class = 'DTCConnected'
class DTCConnected (Plugin):
__gtype_name__='DTCConnected'
def __init__(self, app):
Plugin.__init__(self)
self.app = app
self.ui_info = '''<ui>
<toolbar name='ToolBar'>
<placeholder name='DeviceToolItems'>
<separator/>
<toolitem action='ConnectedDTC'/>
</placeholder>
</toolbar>
</ui>'''
self._create_action_group()
def _sensitize_action(self):
sensitive = self.app.device.connected
self.app.ui.get_widget('/ToolBar/DeviceToolItems/ConnectedDTC').set_sensitive(sensitive)
def _scheduler_notify_working_cb(self, scheduler, monitoring):
self._sensitize_action()
def _obd_connected_cb(self, obd, connected):
self._sensitize_action()
def _create_action_group(self):
entries = (
( 'ConnectedDTC', gtk.STOCK_YES,
_('_Connected DTC'), '',
_('Connection status indicator'), self.activate_connected_dtc ),)
self.action_group = gtk.ActionGroup("ConnectedDTCActionGroup")
self.action_group.add_actions(entries)
def activate_connected_dtc(self, action):
def success_cb(cmd, result, args):
if result:
self.app.reset()
def err_cb(cmd, err, args):
dialog = gtk.MessageDialog(self.app, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
_("""An error occurred while trying to disconnect.\n
Please make sure your device is connected.
The ignition must be turned on but the engine should not be running"""))
dialog.run()
dialog.destroy()
dialog = gtk.MessageDialog(self.app, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL,
_("""Are You Sure You want to disconnect the OBD device?"""))
dialog.show()
res = dialog.run()
dialog.destroy()
if res == gtk.RESPONSE_OK:
try:
self.app.device.close()
#MOVE THIS
except OBDPortError, e:
err, msg = e
dialog = gtk.MessageDialog(self.app, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
err + '\n\n' + msg)
dialog.run()
dialog.destroy()
raise
def load(self):
self._sensitize_action()
#self.app.scheduler.connect('notify::working', self._scheduler_notify_working_cb)
self.app.device.connect('connected', self._obd_connected_cb)
def unload(self):
pass
def start(self):
pass
def stop(self):
pass
| gpl-3.0 |
secimTools/SECIMTools | src/secimtools/dataManager/interface.py | 2 | 19566 | #!/usr/bin/env python
"""
Secim Tools data interface library.
"""
# Built-in packages
import re
import sys
# Add-on packages
import numpy as np
import pandas as pd
class wideToDesign:
""" Class to handle generic data in a wide format with an associated design file. """
def __init__(self, wide, design, uniqID, group=False, runOrder=False, anno=False, clean_string=True,
infer_sampleID=True, keepSample=True, logger=None):
""" Import and set-up data.
Import data both wide formated data and a design file. Set-up basic
attributes.
:Arguments:
wide (TSV): A table in wide format with compounds/genes as rows and
samples as columns.
Name sample1 sample2 sample3
------------------------------------
one 10 20 10
two 10 20 10
design (TSV): A table relating samples ('sampleID') to groups or
treatments.
sampleID group1 group2
-------------------------
sample1 g1 t1
sample2 g1 t1
sample3 g1 t1
uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
group (str): The name of column names in 'design' that give
group information. For example: treatment
clean_string (bool): If True remove special characters from strings
in dataset.
infer_sampleID (bool): If True infer "sampleID" from different capitalizations.
anno (list): A list of additional annotations that can be used to group
items.
:Returns:
**Attribute**
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.sampleIDs (list): A list of sampleIDs. These will correspond
to columns in self.wide.
self.design (pd.DataFrame): A table relating sampleID to groups.
self.group (list): A list of column names in self.design that give
group information. For example: treatment, tissue
anno (list): A list of additional annotations that can be used to group
items.
self.levels (list): A list of levels in self.group. For example:
trt1, tr2, control.
"""
# Setting logger
if logger is None:
self.logger = False
else:
self.logger = logger
# Saving original str
self.origString = dict()
# Import wide formatted data file
try:
self.uniqID = uniqID
self.wide = pd.read_table(wide)
if clean_string:
self.wide[self.uniqID] = self.wide[self.uniqID].apply(lambda x: self._cleanStr(str(x)))
self.wide.rename(columns=lambda x: self._cleanStr(x), inplace=True)
# Make sure index is a string and not numeric
self.wide[self.uniqID] = self.wide[self.uniqID].astype(str)
# Set index to uniqID column
self.wide.set_index(self.uniqID, inplace=True)
except ValueError:
if self.logger:
self.logger.error("Please make sure that your data file has a column called '{0}'.".format(uniqID))
else:
print(("Please make sure that your data file has a column called '{0}'.".format(uniqID)))
raise ValueError
# Import design file
try:
self.design = pd.read_table(design)
# This part of the script allows the user to use any capitalization of "sampleID"
# ie. "sample Id" would be converted to "sampleID".
# If you want to accept only the exact capitalization turn infer_sampleID to Fake
## AMM added additional backslash to \s in regex below
if infer_sampleID:
renamed = {column: re.sub(r"[s|S][a|A][m|M][p|P][l|L][e|E][\\s?|_?][I|i][d|D]",
"sampleID", column) for column in self.design.columns}
self.design.rename(columns=renamed, inplace=True)
log_msg = "Inferring 'sampleID' from data. This will accept different capitalizations of the word"
if self.logger:
self.logger.info(log_msg)
else:
print(log_msg)
# Make sure index is a string and not numeric
self.design['sampleID'] = self.design['sampleID'].astype(str)
self.design.set_index('sampleID', inplace=True)
#print(self.design)
# Cleaning design file
if clean_string:
self.design.rename(index=lambda x: self._cleanStr(x), inplace=True)
# Create a list of sampleIDs, but first check that they are present
# in the wide data.
self.sampleIDs = list()
for sample in self.design.index.tolist():
if sample in self.wide.columns:
self.sampleIDs.append(sample)
else:
if self.logger:
self.logger.warn("Sample {0} missing in wide dataset".format(sample))
else:
print(("WARNING - Sample {0} missing in wide dataset".format(sample)))
for sample in self.wide.columns.tolist():
if not (sample in self.design.index):
if keepSample:
if self.logger:
self.logger.warn("Sample {0} missing in design file".format(sample))
else:
print(("WARNING - Sample {0} missing in design file".format(sample)))
else:
if self.logger:
self.logger.error("Sample {0} missing in design file".format(sample))
raise
else:
print(("ERROR - Sample {0} missing in design file".format(sample)))
raise
# Drop design rows that are not in the wide data set
self.design = self.design[self.design.index.isin(self.sampleIDs)]
#print("DEBUG: design")
#print(self.design)
# Removing characters from data!!!!!!(EXPERIMENTAL)
self.wide.replace(r'\D', np.nan, regex=True, inplace=True)
# Possible bad design, bare except should not be used
except SystemError:
print(("Error:", sys.exc_info()[0]))
raise
# Save annotations
self.anno = anno
# Save runOrder
self.runOrder = runOrder
# Set up group information
if group:
if clean_string:
self.group = self._cleanStr(group)
self.design.columns = [self._cleanStr(x) for x in self.design.columns]
else:
self.group = group
keep = self.group.split(",")
# combine group, anno and runorder
if self.runOrder and self.anno:
keep = keep + [self.runOrder, ] + self.anno
elif self.runOrder and not self.anno:
keep = keep + [self.runOrder, ]
elif not self.runOrder and self.anno:
keep = keep + self.anno
# Check if groups, runOrder and levels columns exist in the design file
designCols = self.design.columns.tolist()
if keep == designCols:
# Check if columns exist on design file.
self.design = self.design[keep] # Only keep group columns in the design file
self.design[self.group] = self.design[self.group].astype(str) # Make sure groups are strings
# Create list of group levels
grp = self.design.groupby(self.group)
self.levels = sorted(grp.groups.keys()) # Get a list of group levels
else:
self.group = None
# Keep samples listed in design file
if keepSample:
self.keep_sample(self.sampleIDs)
def _cleanStr(self, x):
""" Clean strings so they behave.
For some modules, uniqIDs and groups cannot contain spaces, '-', '*',
'/', '+', or '()'. For example, statsmodel parses the strings and interprets
them in the model.
:Arguments:
x (str): A string that needs cleaning
:Returns:
x (str): The cleaned string.
self.origString (dict): A dictionary where the key is the new
string and the value is the original string. This will be useful
for reverting back to original values.
"""
if isinstance(x, str):
val = x
x = re.sub(r'^-([0-9].*)', r'__\1', x)
x = x.replace(' ', '_')
x = x.replace('.', '_')
x = x.replace('-', '_')
x = x.replace('*', '_')
x = x.replace('/', '_')
x = x.replace('+', '_')
x = x.replace('(', '_')
x = x.replace(')', '_')
x = x.replace('[', '_')
x = x.replace(']', '_')
x = x.replace('{', '_')
x = x.replace('}', '_')
x = x.replace('"', '_')
x = x.replace('\'', '_')
x = re.sub(r'^([0-9].*)', r'_\1', x)
self.origString[x] = val
return x
def revertStr(self, x):
""" Revert strings back to their original value so they behave well.
Clean strings may need to be reverted back to original values for
convience.
:Arguments:
x (str): A string that needs cleaning
self.origString (dict): A dictionary where the key is the cleaned
string and the value is the original string.
:Returns:
x (str): Original string.
"""
if isinstance(x, str) and x in self.origString:
x = self.origString[x]
return x
def melt(self):
""" Convert a wide formated table to a long formated table.
:Arguments:
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
self.sampleIDs (list): An list of sampleIDs. These will correspond
to columns in self.wide.
:Returns:
**Attributes**
self.long (pd.DataFrame): Creates a new attribute called self.long
that also has group information merged to the dataset.
"""
melted = pd.melt(self.wide.reset_index(), id_vars=self.uniqID, value_vars=self.sampleIDs,
var_name='sampleID')
melted.set_index('sampleID', inplace=True)
self.long = melted.join(self.design).reset_index() # merge on group information using sampleIDs as key
def transpose(self):
""" Transpose the wide table and merge on treatment information.
:Arguments:
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.design (pd.DataFrame): A table relating sampleID to groups.
:Returns:
merged (pd.DataFrame): A wide formatted table with sampleID as row
and compound/gene as column. Also has column with group ID.
"""
trans = self.wide[self.sampleIDs].T
# Merge on group information using table index (aka 'sampleID')
merged = trans.join(self.design)
merged.index.name = 'sampleID'
return merged
def getRow(self, ID):
""" Get a row corresponding to a uniqID.
:Arguments:
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
ID (str): A string referring to a uniqID in the dataset.
:Returns:
(pd.DataFrame): with only the corresponding rows from the uniqID.
"""
return self.wide[self.wide[self.uniqID] == ID]
def keep_sample(self, sampleIDs):
"""
Keep only the given sampleIDs in the wide and design file.
:Arguments:
:param list sampleIDs: A list of sampleIDs to keep.
:Returns:
:rtype: wideToDesign
:return: Updates the wideToDesign object to only have those sampleIDs.
"""
self.sampleIDs = sampleIDs
self.wide = self.wide[self.sampleIDs]
self.design = self.design[self.design.index.isin(self.sampleIDs)]
def removeSingle(self):
"""
Removes groups with just one sample
"""
if self.group:
for level, current in self.design.groupby(self.group):
if len(current) < 2:
self.design.drop(current.index, inplace=True)
self.wide.drop(current.index, axis=1, inplace=True)
log_msg = """Your group '{0}' has only one element,"
"this group is going to be removed from"
"further calculations.""".format(level)
if self.logger:
self.logger.warn(log_msg)
else:
print(log_msg)
def dropMissing(self):
"""
Drops rows with missing data
"""
# Asks if any missing value
if np.isnan(self.wide.values).any():
# Count original number of rows
n_rows = len(self.wide.index)
# Drop missing values
self.wide.dropna(inplace=True)
# Count the dropped rows
n_rows_kept = len(self.wide.index)
# Logging!!!
log_msg = """Missing values were found in wide data.
[{0}] rows were dropped""".format(n_rows - n_rows_kept)
if self.logger:
self.logger.warn(log_msg)
else:
print(log_msg)
class annoFormat:
""" Class to handle generic data in a wide format with an associated design file. """
def __init__(self, data, uniqID, mz, rt, anno=False, clean_string=True):
""" Import and set-up data.
Import data both wide formated data and a design file. Set-up basic
attributes.
:Arguments:
wide (TSV): A table in wide format with compounds/genes as rows and
samples as columns.
Name sample1 sample2 sample3
------------------------------------
one 10 20 10
two 10 20 10
design (TSV): A table relating samples ('sampleID') to groups or
treatments.
sampleID group1 group2
-------------------------
sample1 g1 t1
sample2 g1 t1
sample3 g1 t1
uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
group (str): The name of column names in 'design' that give
group information. For example: treatment
clean_string (bool): If True remove special characters from strings
in dataset.
anno (list): A list of additional annotations that can be used to group
items.
:Returns:
**Attribute**
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.sampleIDs (list): A list of sampleIDs. These will correspond
to columns in self.wide.
self.design (pd.DataFrame): A table relating sampleID to groups.
self.group (list): A list of column names in self.design that give
group information. For example: treatment, tissue
anno (list): A list of additional annotations that can be used to group
items.
self.levels (list): A list of levels in self.group. For example:
trt1, tr2, control.
"""
self.origString = dict()
# Import anno formatted data file
try:
self.uniqID = uniqID
self.mz = mz
self.rt = rt
# Trying to import
self.data = pd.read_table(data)
if clean_string:
self.data[self.uniqID] = self.data[self.uniqID].apply(lambda x: self._cleanStr(x))
self.data.rename(columns=lambda x: self._cleanStr(x), inplace=True)
# Make sure index is a string and not numeric
self.data[self.uniqID] = self.data[self.uniqID].astype(str)
# Set index to uniqID column
self.data.set_index(self.uniqID, inplace=True)
# If not annotation then ignoring additional columns
self.anno = None
if not(anno):
self.data = self.data[[self.mz, self.rt]]
else:
self.anno = self.data.columns.tolist()
self.anno.remove(self.mz)
self.anno.remove(self.rt)
except ValueError:
print(("Data file must have columns called '{0}','{1}' and '{2}'.".format(uniqID, mz, rt)))
raise ValueError
def _cleanStr(self, x):
""" Clean strings so they behave.
For some modules, uniqIDs and groups cannot contain spaces, '-', '*',
'/', '+', or '()'. For example, statsmodel parses the strings and interprets
them in the model.
:Arguments:
x (str): A string that needs cleaning
:Returns:
x (str): The cleaned string.
self.origString (dict): A dictionary where the key is the new
string and the value is the original string. This will be useful
for reverting back to original values.
"""
if isinstance(x, str):
val = x
x = x.replace(' ', '_')
x = x.replace('.', '_')
x = x.replace('-', '_')
x = x.replace('*', '_')
x = x.replace('/', '_')
x = x.replace('+', '_')
x = x.replace('(', '_')
x = x.replace(')', '_')
x = x.replace('[', '_')
x = x.replace(']', '_')
x = x.replace('{', '_')
x = x.replace('}', '_')
x = x.replace('"', '_')
x = x.replace('\'', '_')
x = re.sub(r'^([0-9].*)', r'_\1', x)
self.origString[x] = val
return x
if __name__ == '__main__':
pass
| mit |
tchellomello/home-assistant | homeassistant/components/rflink/switch.py | 7 | 2285 | """Support for Rflink switches."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import (
CONF_ALIASES,
CONF_DEVICE_DEFAULTS,
CONF_DEVICES,
CONF_FIRE_EVENT,
CONF_GROUP,
CONF_GROUP_ALIASES,
CONF_NOGROUP_ALIASES,
CONF_SIGNAL_REPETITIONS,
DEVICE_DEFAULTS_SCHEMA,
SwitchableRflinkDevice,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})
): DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_GROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_FIRE_EVENT): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
}
)
},
},
extra=vol.ALLOW_EXTRA,
)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink switch devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
device = RflinkSwitch(device_id, **device_config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink platform."""
async_add_entities(devices_from_config(config))
class RflinkSwitch(SwitchableRflinkDevice, SwitchEntity):
"""Representation of a Rflink switch."""
| apache-2.0 |
Osmose/trephub | vendor-local/lib/python/requests/packages/charade/langthaimodel.py | 206 | 11475 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| bsd-3-clause |
anbangr/trusted-nova | nova/ipv6/account_identifier.py | 20 | 1970 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""IPv6 address generation with account identifier embedded"""
import hashlib
import netaddr
def to_global(prefix, mac, project_id):
project_hash = netaddr.IPAddress(
int(hashlib.sha1(project_id).hexdigest()[:8], 16) << 32)
static_num = netaddr.IPAddress(0xff << 24)
try:
mac_suffix = netaddr.EUI(mac).words[3:]
int_addr = int(''.join(['%02x' % i for i in mac_suffix]), 16)
mac_addr = netaddr.IPAddress(int_addr)
maskIP = netaddr.IPNetwork(prefix).ip
return (project_hash ^ static_num ^ mac_addr | maskIP).format()
except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
except TypeError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
except NameError:
raise TypeError(_('Bad project_id for to_global_ipv6: %s') %
project_id)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ff:ffff')
mac = netaddr.EUI(int(address & mask1)).words
return ':'.join(['02', '16', '3e'] + ['%02x' % i for i in mac[3:6]])
| apache-2.0 |
witcxc/scipy | scipy/io/arff/tests/test_arffread.py | 26 | 7733 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import datetime
import os
import sys
from os.path import join as pjoin
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
import numpy as np
from numpy.testing import (TestCase, assert_array_almost_equal, assert_array_equal, assert_equal,
assert_, assert_raises, dec, run_module_suite)
from scipy.io.arff.arffread import loadarff
from scipy.io.arff.arffread import read_header, parse_type, ParseArffError
from scipy._lib._version import NumpyVersion
data_path = pjoin(os.path.dirname(__file__), 'data')
test1 = os.path.join(data_path, 'test1.arff')
test2 = os.path.join(data_path, 'test2.arff')
test3 = os.path.join(data_path, 'test3.arff')
test4 = pjoin(data_path, 'test4.arff')
test5 = pjoin(data_path, 'test5.arff')
test6 = pjoin(data_path, 'test6.arff')
test7 = pjoin(data_path, 'test7.arff')
test8 = pjoin(data_path, 'test8.arff')
expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
(-0.1, -0.2, -0.3, -0.4, 'class2'),
(1, 2, 3, 4, 'class3')]
expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']
missing = pjoin(data_path, 'missing.arff')
expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
expect_missing = np.empty(3, [('yop', np.float), ('yap', np.float)])
expect_missing['yop'] = expect_missing_raw[:, 0]
expect_missing['yap'] = expect_missing_raw[:, 1]
class DataTest(TestCase):
def test1(self):
# Parsing trivial file with nothing.
self._test(test4)
def test2(self):
# Parsing trivial file with some comments in the data section.
self._test(test5)
def test3(self):
# Parsing trivial file with nominal attribute of 1 character.
self._test(test6)
def _test(self, test_file):
data, meta = loadarff(test_file)
for i in range(len(data)):
for j in range(4):
assert_array_almost_equal(expect4_data[i][j], data[i][j])
assert_equal(meta.types(), expected_types)
def test_filelike(self):
# Test reading from file-like object (StringIO)
f1 = open(test1)
data1, meta1 = loadarff(f1)
f1.close()
f2 = open(test1)
data2, meta2 = loadarff(StringIO(f2.read()))
f2.close()
assert_(data1 == data2)
assert_(repr(meta1) == repr(meta2))
class MissingDataTest(TestCase):
def test_missing(self):
data, meta = loadarff(missing)
for i in ['yop', 'yap']:
assert_array_almost_equal(data[i], expect_missing[i])
class HeaderTest(TestCase):
def test_type_parsing(self):
# Test parsing type of attribute from their value.
ofile = open(test2)
rel, attrs = read_header(ofile)
ofile.close()
expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric',
'numeric', 'string', 'string', 'nominal', 'nominal']
for i in range(len(attrs)):
assert_(parse_type(attrs[i][1]) == expected[i])
def test_badtype_parsing(self):
# Test parsing wrong type of attribute from their value.
ofile = open(test3)
rel, attrs = read_header(ofile)
ofile.close()
for name, value in attrs:
assert_raises(ParseArffError, parse_type, value)
def test_fullheader1(self):
# Parsing trivial header with nothing.
ofile = open(test1)
rel, attrs = read_header(ofile)
ofile.close()
# Test relation
assert_(rel == 'test1')
# Test numerical attributes
assert_(len(attrs) == 5)
for i in range(4):
assert_(attrs[i][0] == 'attr%d' % i)
assert_(attrs[i][1] == 'REAL')
# Test nominal attribute
assert_(attrs[4][0] == 'class')
assert_(attrs[4][1] == '{class0, class1, class2, class3}')
def test_dateheader(self):
ofile = open(test7)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test7')
assert_(len(attrs) == 5)
assert_(attrs[0][0] == 'attr_year')
assert_(attrs[0][1] == 'DATE yyyy')
assert_(attrs[1][0] == 'attr_month')
assert_(attrs[1][1] == 'DATE yyyy-MM')
assert_(attrs[2][0] == 'attr_date')
assert_(attrs[2][1] == 'DATE yyyy-MM-dd')
assert_(attrs[3][0] == 'attr_datetime_local')
assert_(attrs[3][1] == 'DATE "yyyy-MM-dd HH:mm"')
assert_(attrs[4][0] == 'attr_datetime_missing')
assert_(attrs[4][1] == 'DATE "yyyy-MM-dd HH:mm"')
def test_dateheader_unsupported(self):
ofile = open(test8)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test8')
assert_(len(attrs) == 2)
assert_(attrs[0][0] == 'attr_datetime_utc')
assert_(attrs[0][1] == 'DATE "yyyy-MM-dd HH:mm Z"')
assert_(attrs[1][0] == 'attr_datetime_full')
assert_(attrs[1][1] == 'DATE "yy-MM-dd HH:mm:ss z"')
class DateAttributeTest(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def setUp(self):
self.data, self.meta = loadarff(test7)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_year_attribute(self):
expected = np.array([
'1999',
'2004',
'1817',
'2100',
'2013',
'1631'
], dtype='datetime64[Y]')
assert_array_equal(self.data["attr_year"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_month_attribute(self):
expected = np.array([
'1999-01',
'2004-12',
'1817-04',
'2100-09',
'2013-11',
'1631-10'
], dtype='datetime64[M]')
assert_array_equal(self.data["attr_month"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_date_attribute(self):
expected = np.array([
'1999-01-31',
'2004-12-01',
'1817-04-28',
'2100-09-10',
'2013-11-30',
'1631-10-15'
], dtype='datetime64[D]')
assert_array_equal(self.data["attr_date"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_datetime_local_attribute(self):
expected = np.array([
datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1),
datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59),
datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0),
datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0),
datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55),
datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4)
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_local"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_datetime_missing(self):
expected = np.array([
'nat',
'2004-12-01T23:59Z',
'nat',
'nat',
'2013-11-30T04:55Z',
'1631-10-15T20:04Z'
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_missing"], expected)
def test_datetime_timezone(self):
assert_raises(ValueError, loadarff, test8)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ewandor/home-assistant | homeassistant/components/sensor/glances.py | 6 | 6483 | """
Support gathering system information of hosts which are running glances.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.glances/
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, CONF_RESOURCES, TEMP_CELSIUS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'api/2/all'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Glances'
DEFAULT_PORT = '61208'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
SENSOR_TYPES = {
'disk_use_percent': ['Disk used', '%', 'mdi:harddisk'],
'disk_use': ['Disk used', 'GiB', 'mdi:harddisk'],
'disk_free': ['Disk free', 'GiB', 'mdi:harddisk'],
'memory_use_percent': ['RAM used', '%', 'mdi:memory'],
'memory_use': ['RAM used', 'MiB', 'mdi:memory'],
'memory_free': ['RAM free', 'MiB', 'mdi:memory'],
'swap_use_percent': ['Swap used', '%', 'mdi:memory'],
'swap_use': ['Swap used', 'GiB', 'mdi:memory'],
'swap_free': ['Swap free', 'GiB', 'mdi:memory'],
'processor_load': ['CPU load', '15 min', 'mdi:memory'],
'process_running': ['Running', 'Count', 'mdi:memory'],
'process_total': ['Total', 'Count', 'mdi:memory'],
'process_thread': ['Thread', 'Count', 'mdi:memory'],
'process_sleeping': ['Sleeping', 'Count', 'mdi:memory'],
'cpu_temp': ['CPU Temp', TEMP_CELSIUS, 'mdi:thermometer'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_RESOURCES, default=['disk_use']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Glances sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'http://{}:{}/{}'.format(host, port, _RESOURCE)
var_conf = config.get(CONF_RESOURCES)
rest = GlancesData(url)
rest.update()
dev = []
for resource in var_conf:
dev.append(GlancesSensor(rest, name, resource))
add_devices(dev, True)
class GlancesSensor(Entity):
"""Implementation of a Glances sensor."""
def __init__(self, rest, name, sensor_type):
"""Initialize the sensor."""
self.rest = rest
self._name = name
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.rest.data is not None
@property
def state(self):
"""Return the state of the resources."""
return self._state
def update(self):
"""Get the latest data from REST API."""
self.rest.update()
value = self.rest.data
if value is not None:
if self.type == 'disk_use_percent':
self._state = value['fs'][0]['percent']
elif self.type == 'disk_use':
self._state = round(value['fs'][0]['used'] / 1024**3, 1)
elif self.type == 'disk_free':
try:
self._state = round(value['fs'][0]['free'] / 1024**3, 1)
except KeyError:
self._state = round((value['fs'][0]['size'] -
value['fs'][0]['used']) / 1024**3, 1)
elif self.type == 'memory_use_percent':
self._state = value['mem']['percent']
elif self.type == 'memory_use':
self._state = round(value['mem']['used'] / 1024**2, 1)
elif self.type == 'memory_free':
self._state = round(value['mem']['free'] / 1024**2, 1)
elif self.type == 'swap_use_percent':
self._state = value['memswap']['percent']
elif self.type == 'swap_use':
self._state = round(value['memswap']['used'] / 1024**3, 1)
elif self.type == 'swap_free':
self._state = round(value['memswap']['free'] / 1024**3, 1)
elif self.type == 'processor_load':
# Windows systems don't provide load details
try:
self._state = value['load']['min15']
except KeyError:
self._state = value['cpu']['total']
elif self.type == 'process_running':
self._state = value['processcount']['running']
elif self.type == 'process_total':
self._state = value['processcount']['total']
elif self.type == 'process_thread':
self._state = value['processcount']['thread']
elif self.type == 'process_sleeping':
self._state = value['processcount']['sleeping']
elif self.type == 'cpu_temp':
for sensor in value['sensors']:
if sensor['label'] == 'CPU':
self._state = sensor['value']
self._state = None
class GlancesData(object):
"""The class for handling the data retrieval."""
def __init__(self, resource):
"""Initialize the data object."""
self._resource = resource
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Glances REST API."""
try:
response = requests.get(self._resource, timeout=10)
self.data = response.json()
except requests.exceptions.ConnectionError:
_LOGGER.error("Connection error: %s", self._resource)
self.data = None
| apache-2.0 |
DistroSeed/DistroSeed-Dashboard | distroseed/dashboard/views.py | 2 | 13958 | import re
import os
import ast
import json
import requests
import subprocess
import transmissionrpc
from hurry.filesize import size
from urlparse import urljoin
from django.db.models import *
from django.template import Context, loader, RequestContext
from django.shortcuts import render_to_response, get_object_or_404, render, redirect
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django import forms
from django.utils import timezone
from .forms import AutoTorrentForm, NewAutoTorrentForm, TransmissionSettingForm
from .models import *
def auth_login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request,user)
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponseRedirect(reverse('index'))
else:
return HttpResponseRedirect(reverse('index'))
def auth_logout(request):
logout(request)
return HttpResponseRedirect(reverse('index'))
def index(request):
torrents = []
tc = transmissionrpc.Client('127.0.0.1', port=9091)
session_stats = tc.session_stats()
cumulative_stats = session_stats.cumulative_stats
uploaded = size(cumulative_stats['uploadedBytes'])
downloaded = size(cumulative_stats['downloadedBytes'])
active_torrents = session_stats.activeTorrentCount
torrent_count = session_stats.torrentCount
free_space = size(session_stats.download_dir_free_space)
current_torrents = tc.get_torrents()
for t in current_torrents:
percent = t.progress
name = t.name.replace('.iso','').replace('.img','')
if 'ubuntu' in name:
name_array = name.split('-')
distro = name_array[0].capitalize()
version = name_array[1]
if name_array[3] == 'amd64':
arch = 'x64'
if name_array[3] == 'i386':
arch = 'x32'
type = name_array[2].capitalize() + ' ' + arch
elif 'centos' in name.lower():
name_array = name.split('-')
distro = name_array[0]
version = name_array[1]
if name_array[2] == 'x86_64':
arch = 'x64'
if name_array[2] == 'x86':
arch = 'x32'
type = name_array[3].capitalize() + ' ' + arch
elif 'fedora' in name.lower():
name_array = name.split('-')
distro = name_array[0]
try:
version = re.sub("_", " ", name_array[4] + ' ' + name_array[5])
except:
version = re.sub("_", " ", name_array[4])
if name_array[3] == 'x86_64':
arch = 'x64'
if name_array[3] == 'x86':
arch = 'x32'
if name_array[3] == 'i686':
arch = 'x32'
type = name_array[1] + ' ' + re.sub("_", " ", name_array[2].capitalize()).title() + ' ' + arch
elif 'raspbian' in name.lower():
name_array = name.split('-')
distro = name_array[3].capitalize()
try:
version = re.sub(".zip", "", name_array[4] + ' ' + name_array[5]).capitalize()
except:
version = re.sub(".zip", "", name_array[4]).capitalize()
arch = 'ARM'
type = arch
elif 'archlinux' in name.lower():
name_array = name.split('-')
distro = name_array[0].capitalize()
version = name_array[1]
if name_array[2] == 'x86_64':
arch = 'x64'
if name_array[2] == 'amd64':
arch = 'x64'
if name_array[2] == 'x86':
arch = 'x32'
if name_array[2] == 'i686':
arch = 'x32'
if name_array[2] == 'dual':
arch = 'x32 & x64'
type = arch
elif 'kali' in name.lower():
name_array = name.split('-')
distro = name_array[0].capitalize()
v = name_array[2]
try:
float(v)
version = name_array[2]
arch_item = name_array[3]
except ValueError:
version = name_array[3] + ' ' + name_array[2]
arch_item = name_array[4]
if arch_item == 'x86_64':
arch = 'x64'
if arch_item == 'amd64':
arch = 'x64'
if arch_item == 'x86':
arch = 'x32'
if arch_item == 'i686':
arch = 'x32'
if arch_item == 'i386':
arch = 'x32'
if arch_item == 'armel':
arch = 'ARMEL'
if arch_item == 'armhf':
arch = 'ARMHF'
type = arch
elif 'slackware' in name.lower():
name_array = name.split('-')
distro = re.sub("64", "", name_array[0].capitalize())
version = name_array[1]
if '64' in name_array[0]:
arch = 'x64'
else:
arch = 'x32'
type = 'Install'
elif 'debian' in name.lower():
name_array = name.split('-')
distro = name_array[0].capitalize()
e2 = name_array[1]
if 'update' in e2:
version = name_array[2]
if name_array[3] == 'x86_64':
arch = 'x64'
if name_array[3] == 'amd64':
arch = 'x64'
if name_array[3] == 'x86':
arch = 'x32'
if name_array[3] == 'i686':
arch = 'x32'
if name_array[3] == 'dual':
arch = 'x32 & x64'
type = name_array[1].title() + ' ' + name_array[4] + ' ' + name_array[5] + ' ' + arch
else:
version = name_array[1]
if name_array[2] == 'x86_64':
arch = 'x64'
if name_array[2] == 'amd64':
arch = 'x64'
if name_array[2] == 'x86':
arch = 'x32'
if name_array[2] == 'i686':
arch = 'x32'
if name_array[2] == 'dual':
arch = 'x32 & x64'
type = name_array[3] + ' ' + name_array[4] + ' ' + arch
elif 'mint' in name.lower():
name_array = name.split('-')
distro = 'Linux Mint'
if name_array[3] == '64bit':
arch = 'x64'
if name_array[3] == '32bit':
arch = 'x86'
if len(name_array) == 5:
version = name_array[1] + ' ' + name_array[4].title()
else:
version = name_array[1]
type = name_array[2].title() + ' ' + arch
elif 'lmde' in name.lower():
name_array = name.split('-')
distro = 'Linux Mint'
if name_array[3] == '64bit':
arch = 'x64'
if name_array[3] == '32bit':
arch = 'x86'
version = name_array[1]
type = name_array[3].title() + ' ' + arch
elif 'tails' in name.lower():
name_array = name.split('-')
distro = 'Tails'
if name_array[1] == 'amd64':
arch = 'x64'
if name_array[1] == 'i386':
arch = 'x86'
version = name_array[2]
type = 'Live CD'
elif 'opensuse' in name.lower():
name_array = name.split('-')
distro = name_array[0].capitalize()
version = name_array[1] + ' ' + name_array[2]
if name_array[4] == 'x86_64':
arch = 'x64'
if name_array[4] == 'amd64':
arch = 'x64'
if name_array[4] == 'x86':
arch = 'x32'
if name_array[4] == 'i686':
arch = 'x32'
if name_array[4] == 'dual':
arch = 'x32 & x64'
type = name_array[3] + ' ' + arch
else:
name_array = name.split('-')
distro = name_array[0].capitalize()
version = 'unknown'
arch = 'unknown'
type = 'unknown'
dic = {
'name' : t.name,
'distro' : distro,
'version' : version,
'type' : type,
'size' : size(t.sizeWhenDone),
'upload' : size(t.rateUpload),
'download' : size(t.rateDownload),
'percent' : percent,
}
torrents.append(dic)
return render_to_response('index.html', {
'username' : request.user,
'torrents' : torrents,
'uploaded' : uploaded,
'downloaded' : downloaded,
'active_torrents' : active_torrents,
'torrent_count' : torrent_count,
'free_space' : free_space,
}, context_instance=RequestContext(request))
def logs(request):
return render_to_response('logs.html', {'username' : request.user,}, context_instance=RequestContext(request))
def newdistro(request):
if request.method == "POST":
form = NewAutoTorrentForm(request.POST)
if form.is_valid():
model_instance = form.save(commit=False)
model_instance.save()
form.save_m2m()
link = AutoTorrent.objects.get(id=model_instance.id).url
exclude_list = AutoTorrent.objects.get(id=model_instance.id).excludes.all().values_list('phrase', flat=True)
r = requests.get(link, verify=False)
data = [x[1] for x in re.findall('(src|href)="(\S+)"',r.content)]
links = filter(lambda x:x.endswith(".torrent"), data)
torrent_links = [urljoin(link,l) if 'http' not in l else l for l in links]
torrent_links = [l for l in torrent_links if not any(ex.lower() in l.lower() for ex in exclude_list)]
for torrent in torrent_links:
with open('/data/downloads/torrents/' + torrent.split('/')[-1], 'wb') as f:
response = requests.get(torrent, stream=True, verify=False)
for block in response.iter_content(1024):
f.write(block)
return HttpResponseRedirect(reverse('newdistro'))
form = NewAutoTorrentForm()
return render_to_response('newdistro.html', {
'username' : request.user,
'form' : form,
}, context_instance=RequestContext(request))
def currentdistro(request):
if request.method == "POST":
instance = AutoTorrent.objects.get(id=request.POST['id'])
form = AutoTorrentForm(request.POST or None, instance=instance)
if form.is_valid():
model_instance = form.save(commit=False)
model_instance.save()
form.save_m2m()
link = AutoTorrent.objects.get(id=model_instance.id).url
exclude_list = AutoTorrent.objects.get(id=model_instance.id).excludes.all().values_list('phrase', flat=True)
r = requests.get(link, verify=False)
data = [x[1] for x in re.findall('(src|href|HREF|SRC)="(\S+)"',r.content)]
links = filter(lambda x:x.endswith(".torrent"), data)
torrent_links = [urljoin(link,l) if 'http' not in l else l for l in links]
torrent_links = [l for l in torrent_links if not any(ex.lower() in l.lower() for ex in exclude_list)]
for torrent in torrent_links:
filedl = requests.get(torrent, stream=True, verify=False)
with open('/data/downloads/torrents/' + torrent.split('/')[-1], 'wb') as f:
for chunk in filedl.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return HttpResponseRedirect(reverse('currentdistro'))
forms = []
current_autotorrents = AutoTorrent.objects.all()
for torrent in current_autotorrents:
forms.append(AutoTorrentForm(None, instance=torrent))
return render_to_response('currentdistro.html', {
'username' : request.user,
'forms': forms
}, context_instance=RequestContext(request))
def notifications(request):
return render_to_response('notifications.html', {'username' : request.user,}, context_instance=RequestContext(request))
def settings(request):
if request.method == "POST":
instance = TransmissionSetting.objects.get(id=request.POST['id'])
form = TransmissionSettingForm(request.POST, instance=instance)
if form.is_valid():
model_instance = form.save(commit=False)
model_instance.save()
qs = json.dumps(ast.literal_eval(re.sub("_", "-", str(TransmissionSetting.objects.all()[:1].values()[0]))))
transmissionobj = json.loads(qs)
subprocess.call(['systemctl', 'stop', 'transmission-daemon'])
with open('/var/lib/transmission/.config/transmission-daemon/settings.json', 'wb') as f:
json.dump(transmissionobj, f, indent=4, sort_keys=True)
subprocess.call(['systemctl', 'start', 'transmission-daemon'])
return HttpResponseRedirect(reverse('settings'))
current_settings = TransmissionSetting.objects.all()[:1][0]
form = TransmissionSettingForm(None, instance=current_settings)
return render_to_response('settings.html', {'username' : request.user, 'form': form,}, context_instance=RequestContext(request))
def timeline(request):
return render_to_response('timeline.html', {'username' : request.user,}, context_instance=RequestContext(request))
| gpl-3.0 |
gijzelaerr/python-libchan | setup.py | 1 | 1364 | import os
from setuptools import setup, find_packages
import imp
REQUIREMENTS = ["offset"]
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries']
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
DATA_FILES = [('libchan', ["LICENSE", "MANIFEST.in", "README.rst"])]
def load_module(name, path):
f, pathname, description = imp.find_module(name, [path])
return imp.load_module(name, f, pathname, description)
VERSION = load_module('version', './libchan').__version__
setup(name='libchan',
version=VERSION,
description='',
long_description=long_description,
classifiers=CLASSIFIERS,
license='MIT',
url='http://github.com/benoitc/offset',
author='Gijs Molenaar',
author_email='gijs@pythonic.nl',
packages=find_packages(),
install_requires=REQUIREMENTS,
setup_requires=REQUIREMENTS,
tests_require=['pytest'],
data_files=DATA_FILES) | mit |
TUB-Control/PaPI | papi/yapsy/VersionedPluginManager.py | 1 | 4203 | #!/usr/bin/python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t; python-indent: 4 -*-
"""
Role
====
Defines the basic interface for a plugin manager that also keeps track
of versions of plugins
API
===
"""
from distutils.version import StrictVersion
from papi.yapsy.PluginInfo import PluginInfo
from papi.yapsy.PluginManagerDecorator import PluginManagerDecorator
from papi.yapsy.IPlugin import IPlugin
class VersionedPluginInfo(PluginInfo):
"""
Gather some info about a plugin such as its name, author,
description...
"""
def __init__(self, plugin_name, plugin_path):
"""
Set the name and path of the plugin as well as the default
values for other usefull variables.
"""
PluginInfo.__init__(self, plugin_name, plugin_path)
# version number is now required to be a StrictVersion object
self.version = StrictVersion("0.0")
def setVersion(self, vstring):
self.version = StrictVersion(vstring)
class VersionedPluginManager(PluginManagerDecorator):
"""
Handle plugin versioning by making sure that when several
versions are present for a same plugin, only the latest version is
manipulated via the standard methods (eg for activation and
deactivation)
More precisely, for operations that must be applied on a single
named plugin at a time (``getPluginByName``,
``activatePluginByName``, ``deactivatePluginByName`` etc) the
targetted plugin will always be the one with the latest version.
.. note:: The older versions of a given plugin are still reachable
via the ``getPluginsOfCategoryFromAttic`` method.
"""
def __init__(self,
decorated_manager=None,
categories_filter={"Default":IPlugin},
directories_list=None,
plugin_info_ext="yapsy-plugin"):
"""
Create the plugin manager and record the ConfigParser instance
that will be used afterwards.
The ``config_change_trigger`` argument can be used to set a
specific method to call when the configuration is
altered. This will let the client application manage the way
they want the configuration to be updated (e.g. write on file
at each change or at precise time intervalls or whatever....)
"""
# Create the base decorator class
PluginManagerDecorator.__init__(self,decorated_manager,
categories_filter,
directories_list,
plugin_info_ext)
self.setPluginInfoClass(VersionedPluginInfo)
# prepare the storage for the early version of the plugins,
# for which only the latest version is the one that will be
# kept in the "core" plugin storage.
self._prepareAttic()
def _prepareAttic(self):
"""
Create and correctly initialize the storage where the wrong
version of the plugins will be stored.
"""
self._attic = {}
for categ in self.getCategories():
self._attic[categ] = []
def getLatestPluginsOfCategory(self,category_name):
"""
DEPRECATED(>1.8): Please consider using getPluginsOfCategory
instead.
Return the list of all plugins belonging to a category.
"""
return self.getPluginsOfCategory(category_name)
def loadPlugins(self, callback=None):
"""
Load the candidate plugins that have been identified through a
previous call to locatePlugins.
In addition to the baseclass functionality, this subclass also
needs to find the latest version of each plugin.
"""
self._component.loadPlugins(callback)
for categ in self.getCategories():
latest_plugins = {}
allPlugins = self.getPluginsOfCategory(categ)
# identify the latest version of each plugin
for plugin in allPlugins:
name = plugin.name
version = plugin.version
if name in latest_plugins:
if version > latest_plugins[name].version:
older_plugin = latest_plugins[name]
latest_plugins[name] = plugin
self.removePluginFromCategory(older_plugin,categ)
self._attic[categ].append(older_plugin)
else:
self.removePluginFromCategory(plugin,categ)
self._attic[categ].append(plugin)
else:
latest_plugins[name] = plugin
def getPluginsOfCategoryFromAttic(self,categ):
"""
Access the older version of plugins for which only the latest
version is available through standard methods.
"""
return self._attic[categ]
| gpl-3.0 |
sidartaoliveira/ansible | lib/ansible/module_utils/six/_six.py | 28 | 30097 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
andrewd18/eve-wspace | evewspace/account/models.py | 7 | 3471 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm
from Map.models import Map, System
from django.db.models.signals import post_save
import pytz
import datetime
# Create your models here.
class PlayTime(models.Model):
"""PlayTime represents a choice of play times for use in several forms."""
fromtime = models.TimeField()
totime = models.TimeField()
class UserProfile(models.Model):
"""UserProfile defines custom fields tied to each User record in the Django auth DB."""
user = models.ForeignKey(User, unique=True)
jabberid = models.EmailField(blank=True, null=True)
defaultmap = models.ForeignKey(Map, related_name = "defaultusers", blank=True, null=True)
playtimes = models.ManyToManyField(PlayTime)
currentsystem = models.ForeignKey(System, related_name="activepilots", blank=True, null=True)
lastactive = models.DateTimeField()
def update_location(self, system):
"""
updates the current location and last active timestamp for this user
"""
self.currentsystem = system
self.lastactive = datetime.datetime.now(pytz.utc)
self.save()
class GroupProfile(models.Model):
"""GroupProfile defines custom fields tied to each Group record."""
group = models.ForeignKey(Group, related_name='profile', unique=True)
description = models.CharField(max_length=200, blank=True, null=True)
regcode = models.CharField(max_length=64, blank=True, null=True)
def create_user_profile(sender, instance, created, **kwargs):
"""Handle user creation event and create a new profile to match the new user"""
if created:
UserProfile.objects.create(user=instance, lastactive=datetime.datetime.utcnow().replace(tzinfo=pytz.UTC))
post_save.connect(create_user_profile, sender=User)
def create_group_profile(sender, instance, created, **kwargs):
"""Handle group creation event and create a new group profile."""
if created:
GroupProfile.objects.create(group=instance)
post_save.connect(create_group_profile, sender=Group)
class RegistrationForm(UserCreationForm):
"""Extends the django registration form to add fields."""
username = forms.CharField(max_length=30, label="Username")
email = forms.EmailField(required=False, label="E-Mail Address (Optional)")
password2 = forms.CharField(widget=forms.PasswordInput, label="Confirm Password:")
regcode = forms.CharField(max_length=64, label="Registration Code")
| gpl-3.0 |
srikantbmandal/ansible | lib/ansible/modules/cloud/webfaction/webfaction_site.py | 63 | 7118 | #!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
address. You can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
required: false
choices:
- true
- false
default: 'false'
site_apps:
description:
- A mapping of URLs to apps
required: false
subdomains:
description:
- A list of subdomains associated with this site.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host = dict(required=True),
https = dict(required=False, type='bool', default=False),
subdomains = dict(required=False, type='list', default=[]),
site_apps = dict(required=False, type='list', default=[]),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed = False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append( (a[0], a[1]) )
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website (
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
onyxfish/journalism | agate/mapped_sequence.py | 2 | 4458 | #!/usr/bin/env python
"""
This module contains the :class:`MappedSequence` class that forms the foundation
for agate's :class:`.Row` and :class:`.Column` as well as for named sequences of
rows and columns.
"""
from collections import OrderedDict, Sequence
import six
from six.moves import range # pylint: disable=W0622
from agate.utils import memoize
class MappedSequence(Sequence):
"""
A generic container for immutable data that can be accessed either by
numeric index or by key. This is similar to an
:class:`collections.OrderedDict` except that the keys are optional and
iteration over it returns the values instead of keys.
This is the base class for both :class:`.Column` and :class:`.Row`.
:param values:
A sequence of values.
:param keys:
A sequence of keys.
"""
__slots__ = ['_values', '_keys']
def __init__(self, values, keys=None):
self._values = tuple(values)
if keys is not None:
self._keys = keys
else:
self._keys = None
def __getstate__(self):
"""
Return state values to be pickled.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
return {
'_values': self._values,
'_keys': self._keys
}
def __setstate__(self, data):
"""
Restore pickled state.
This is necessary on Python2.7 when using :code:`__slots__`.
"""
self._values = data['_values']
self._keys = data['_keys']
def __unicode__(self):
"""
Print a unicode sample of the contents of this sequence.
"""
sample = u', '.join(repr(d) for d in self.values()[:5])
if len(self) > 5:
sample = u'%s, ...' % sample
return u'<agate.%s: (%s)>' % (type(self).__name__, sample)
def __str__(self):
"""
Print an ascii sample of the contents of this sequence.
"""
if six.PY2: # pragma: no cover
return str(self.__unicode__().encode('utf8'))
return str(self.__unicode__())
def __getitem__(self, key):
"""
Retrieve values from this array by index, slice or key.
"""
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
values = self.values()
return tuple(values[i] for i in indices)
# Note: can't use isinstance because bool is a subclass of int
elif type(key) is int:
return self.values()[key]
else:
return self.dict()[key]
def __setitem__(self, key, value):
"""
Set values by index, which we want to fail loudly.
"""
raise TypeError('Rows and columns can not be modified directly. You probably need to compute a new column.')
def __iter__(self):
"""
Iterate over values.
"""
return iter(self.values())
@memoize
def __len__(self):
return len(self.values())
def __eq__(self, other):
"""
Equality test with other sequences.
"""
if not isinstance(other, Sequence):
return False
return self.values() == tuple(other)
def __ne__(self, other):
"""
Inequality test with other sequences.
"""
return not self.__eq__(other)
def __contains__(self, value):
return self.values().__contains__(value)
def keys(self):
"""
Equivalent to :meth:`collections.OrderedDict.keys`.
"""
return self._keys
def values(self):
"""
Equivalent to :meth:`collections.OrderedDict.values`.
"""
return self._values
@memoize
def items(self):
"""
Equivalent to :meth:`collections.OrderedDict.items`.
"""
return tuple(zip(self.keys(), self.values()))
def get(self, key, default=None):
"""
Equivalent to :meth:`collections.OrderedDict.get`.
"""
try:
return self.dict()[key]
except KeyError:
if default:
return default
else:
return None
@memoize
def dict(self):
"""
Retrieve the contents of this sequence as an
:class:`collections.OrderedDict`.
"""
if self.keys() is None:
raise KeyError
return OrderedDict(self.items())
| mit |
ml-lab/neon | neon/optimizers/learning_rule.py | 4 | 2997 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Generic parent class used to control how updates are applied to coefficients
i.e. how the learning should proceed.
"""
from neon.util.param import opt_param
import logging
import numpy as np
logger = logging.getLogger(__name__)
class LearningRule(object):
"""
Base object for applying learning rule on the parameters to be updated
Attributes:
name (str): Used to identify this LearningRule when logging.
batch_size (int): Number of examples presented at this iteration
"""
def __init__(self, name, lr_params):
self.name = name
opt_param(self, ['velocity_dtype', 'param_dtype', 'gradient_dtype'],
np.float32)
opt_param(self, ['backend_type'], 'np.float32')
if self.backend_type == 'np.float16':
logger.info("Setting learning rule dtypes to float16")
for item in ('velocity_dtype', 'param_dtype', 'gradient_dtype'):
setattr(self, item, np.float16)
def initialize(self, backend):
self.backend = backend
def __str__(self):
be_nm = ''
if hasattr(self, 'backend'):
be_nm = ", utilizing {} backend".format(
self.backend.__class__.__name__)
return ("LearningRule {upd_nm}: {upd_tp} upd_rl{be_nm}\n\t".format(
upd_nm=self.name, upd_tp=self.__class__.__name__, be_nm=be_nm))
def allocate_state(self, params):
pass
def set_pretrain_mode(self, pretrain_mode):
pass
def apply_rule(self, params, updates, epoch):
raise NotImplementedError()
def get_params(self):
np_params = dict()
for p in self.param_names:
if hasattr(self, p):
p_list = getattr(self, p)
np_params[p] = []
for p_tensor in p_list:
np_params[p].append(np.array(
p_tensor.asnumpyarray(), dtype=p_tensor.dtype).reshape(
p_tensor.shape))
return np_params
def set_params(self, params_dict):
for p in self.param_names:
if p in params_dict:
for i in range(len(params_dict[p])):
getattr(self, p)[i][:] = params_dict[p][i]
| apache-2.0 |
Khaon/android_external_skia | platform_tools/android/gyp_gen/vars_dict_lib.py | 146 | 4422 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import types
# The goal of this class is to store a set of unique items in the order in
# which they are inserted. This is important for the final makefile, where
# we want to make sure the image decoders are in a particular order. See
# images.gyp for more information.
class OrderedSet(object):
"""Ordered set of unique items that supports addition and removal.
Retains the order in which items are inserted.
"""
def __init__(self):
self.__ordered_set = []
def add(self, item):
"""Add item, if it is not already in the set.
item is appended to the end if it is not already in the set.
Args:
item: The item to add.
"""
if item not in self.__ordered_set:
self.__ordered_set.append(item)
def __contains__(self, item):
"""Whether the set contains item.
Args:
item: The item to search for in the set.
Returns:
bool: Whether the item is in the set.
"""
return item in self.__ordered_set
def __iter__(self):
"""Iterator for the set.
"""
return self.__ordered_set.__iter__()
def remove(self, item):
"""
Remove item from the set.
Args:
item: Item to be removed.
Raises:
ValueError if item is not in the set.
"""
self.__ordered_set.remove(item)
def __len__(self):
"""Number of items in the set.
"""
return len(self.__ordered_set)
def __getitem__(self, index):
"""Return item at index.
"""
return self.__ordered_set[index]
def reset(self):
"""Reset to empty.
"""
self.__ordered_set = []
def set(self, other):
"""Replace this ordered set with another.
Args:
other: OrderedSet to replace this one. After this call, this OrderedSet
will contain exactly the same elements as other.
"""
self.__ordered_set = list(other.__ordered_set)
VAR_NAMES = ['LOCAL_CFLAGS',
'LOCAL_CPPFLAGS',
'LOCAL_SRC_FILES',
'LOCAL_SHARED_LIBRARIES',
'LOCAL_STATIC_LIBRARIES',
'LOCAL_C_INCLUDES',
'LOCAL_EXPORT_C_INCLUDE_DIRS',
'DEFINES',
'KNOWN_TARGETS',
# These are not parsed by gyp, but set manually.
'LOCAL_MODULE_TAGS',
'LOCAL_MODULE']
class VarsDict(collections.namedtuple('VarsDict', VAR_NAMES)):
"""Custom class for storing the arguments to Android.mk variables.
Can also be treated as a dictionary with fixed keys.
"""
__slots__ = ()
def __new__(cls):
lists = []
# TODO (scroggo): Is there a better way add N items?
for __unused__ in range(len(VAR_NAMES)):
lists.append(OrderedSet())
return tuple.__new__(cls, lists)
def keys(self):
"""Return the field names as strings.
"""
return self._fields
def __getitem__(self, index):
"""Return an item, indexed by a number or a string.
"""
if type(index) == types.IntType:
# Treat the index as an array index into a tuple.
return tuple.__getitem__(self, index)
if type(index) == types.StringType:
# Treat the index as a key into a dictionary.
return eval('self.%s' % index)
return None
def intersect(var_dict_list):
"""Compute intersection of VarsDicts.
Find the intersection of a list of VarsDicts and trim each input to its
unique entries.
Args:
var_dict_list: list of VarsDicts. WARNING: each VarsDict will be
modified in place, to remove the common elements!
Returns:
VarsDict containing list entries common to all VarsDicts in
var_dict_list
"""
intersection = VarsDict()
# First VarsDict
var_dict_a = var_dict_list[0]
# The rest.
other_var_dicts = var_dict_list[1:]
for key in var_dict_a.keys():
# Copy A's list, so we can continue iterating after modifying the original.
a_list = list(var_dict_a[key])
for item in a_list:
# If item is in all lists, add to intersection, and remove from all.
in_all_lists = True
for var_dict in other_var_dicts:
if not item in var_dict[key]:
in_all_lists = False
break
if in_all_lists:
intersection[key].add(item)
for var_dict in var_dict_list:
var_dict[key].remove(item)
return intersection
| bsd-3-clause |
chrishavlin/nyc_taxi_viz | src/taxi_main.py | 1 | 12620 | """
taxi_main.py
module for loading the raw csv taxi files.
Copyright (C) 2016 Chris Havlin, <https://chrishavlin.wordpress.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The database is NOT distributed with the code here.
Data source:
NYC Taxi & Limousine Commision, TLC Trip Record Data
<http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml>
"""
"""--------------
Import libraries:
-----------------"""
import numpy as np
import time,os
import matplotlib.pyplot as plt
from matplotlib import cm
import taxi_plotmod as tpm
import datetime as dt
"""---------
Functions
------------"""
def read_all_variables(f,there_is_a_header,VarImportList):
"""
reads in the raw data from a single file
input:
f file object
there_is_a_header logical flag
VarImportList a list of strings identifying which
data to read in and save
possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
'drop_lat','elapsed_time_min'
output:
Vars a 2D array, each row is a single taxi
pickup instance, each column is a different
Var_list a list of strings where the index of each
entry corresponds to the column of Vars
"""
# count number of lines
indx=0
for line in f:
indx=indx+1
if there_is_a_header:
indx = indx-1
Nlines = indx
# Initizialize Variable Array and List
N_VarImport=len(VarImportList)
Date=np.empty(Nlines,dtype='datetime64[D]')
Vars=np.zeros((indx,N_VarImport))
Var_list=[None] * N_VarImport
# Go back to start of file, loop again to read variables
f.seek(0)
if there_is_a_header:
headerline=f.readline()
indx=0
# loop over lines, store variables
prevprog=0
zero_lines=0
for line in f:
prog= round(float(indx) / float(Nlines-1) * 100)
if prog % 5 == 0 and prog != prevprog and Nlines > 500:
print ' ',int(prog),'% of file read ...'
prevprog=prog
line = line.rstrip()
line = line.split(',')
var_indx = 0
if len(line) == 19:
dates=line[1].split()[0] # the date string, "yyyy-mm-dd"
#dates=dates.split('-')
#dtim=dt.date(int(dates[0]),int(dates[1]),int(dates[2]))
#Date.append(dtim)
Date[indx]=np.datetime64(dates)
if 'pickup_time_hr' in VarImportList:
Vars[indx,var_indx]=datetime_string_to_time(line[1],'hr')
Var_list[var_indx]='pickup_time_hr'
var_indx=var_indx+1
# Vars[indx,var_indx]=np.datetime64(dates)
# Var_list[var_indx]='date'
# var_indx=var_indx+1
if 'dropoff_time_hr' in VarImportList:
Vars[indx,var_indx]=datetime_string_to_time(line[2],'hr')
Var_list[var_indx]='dropoff_time_hr'
var_indx=var_indx+1
if 'dist_mi' in VarImportList:
Vars[indx,var_indx]=float(line[4]) # distance travelled [mi]
Var_list[var_indx]='dist_mi'
var_indx=var_indx+1
if 'elapsed_time_min' in VarImportList:
pickup=datetime_string_to_time(line[1],'hr')*60.0
drop=datetime_string_to_time(line[2],'hr')*60.0
if drop >= pickup:
Vars[indx,var_indx]=drop - pickup
elif drop < pickup:
#print 'whoops:',pickup/60,drop/60,(drop+24*60.-pickup)/60
Vars[indx,var_indx]=drop+24.0*60.0 - pickup
Var_list[var_indx]='elapsed_time_min'
var_indx=var_indx+1
if 'speed_mph' in VarImportList:
pickup=datetime_string_to_time(line[1],'min')
drop=datetime_string_to_time(line[2],'min')
dist=float(line[4]) # [mi]
if drop > pickup:
speed=dist / ((drop - pickup)/60.0) # [mi/hr]
elif drop < pickup:
dT=(drop+24.0*60.0 - pickup)/60.0
speed=dist / dT # [mi/hr]
else:
speed=0
Vars[indx,var_indx]=speed
Var_list[var_indx]='speed_mph'
var_indx=var_indx+1
if 'pickup_lat' in VarImportList:
Vars[indx,var_indx]=float(line[6])
Var_list[var_indx]='pickup_lat'
var_indx=var_indx+1
if 'pickup_lon' in VarImportList:
Vars[indx,var_indx]=float(line[5])
Var_list[var_indx]='pickup_lon'
var_indx=var_indx+1
if 'drop_lat' in VarImportList:
Vars[indx,var_indx]=float(line[10])
Var_list[var_indx]='drop_lat'
var_indx=var_indx+1
if 'drop_lon' in VarImportList:
Vars[indx,var_indx]=float(line[9])
Var_list[var_indx]='drop_lon'
var_indx=var_indx+1
if 'psgger' in VarImportList:
Vars[indx,var_indx]=float(line[3])
Var_list[var_indx]='pssger'
var_indx=var_indx+1
if 'fare' in VarImportList:
Vars[indx,var_indx]=float(line[12])
Var_list[var_indx]='fare'
var_indx=var_indx+1
if 'tips' in VarImportList:
Vars[indx,var_indx]=float(line[15])
Var_list[var_indx]='tips'
var_indx=var_indx+1
if 'payment_type' in VarImportList:
Vars[indx,var_indx]=float(line[11])
Var_list[var_indx]='payment_type'
var_indx=var_indx+1
indx=indx+1
else:
zero_lines=zero_lines+1
# remove zero lines, which will be padded at end
if zero_lines>0:
Vars=Vars[0:Nlines-zero_lines,:]
Date=Date[0:Nlines-zero_lines]
return Vars,Var_list,Date
def datetime_string_to_time(dt_string,time_units):
""" converts datetime string to time in units of time_units
dt_string should be in datetime format: "yyyy-mm-dd hh:mm:ss"
"2016-04-18 18:31:43"
"""
t_string=dt_string.split()[1] # remove the space, take the time string
t_hms=t_string.split(':') # split into hr, min, sec
# unit conversion factors depending on time_units:
if time_units == 'hr':
a = [1.0, 1.0/60.0, 1.0/3600.0]
elif time_units == 'min':
a = [60.0, 1.0, 1.0/60.0]
elif time_units == 'sec':
a = [3600.0, 60.0, 1.0]
time_flt=float(t_hms[0])*a[0]+float(t_hms[1])*a[1]+float(t_hms[2])*a[2]
return time_flt
def read_taxi_files(dir_base,Vars_To_Import):
""" loops over all taxi files in a directory, stores them in memory
input:
dir_base the directory to look for .csv taxi files
Vars_to_Import a list of strings identifying which data to read in and save
possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
'drop_lat','elapsed_time_min'
output:
VarBig a 2D array, each row is a single taxi pickup instance, each column
is a different variable. Data aggregated from all files in directory.
Var_list a list of strings where the index of each entry corresponds to the
column of Vars
"""
N_files=len(os.listdir(dir_base)) # number of files in directory
ifile = 1 # file counter
Elapsed_tot=0 # time counter
#Dates=[]
for fn in os.listdir(dir_base): # loop over directory contents
if os.path.isfile(dir_base+fn): # is the current path obect a file?
flnm=dir_base + fn # construct the file name
print 'Reading File ', ifile,' of ', N_files
start = time.clock() # start timer
fle = open(flnm, 'r') # open the file for reading
# distribute current file to lat/lon bins:
VarChunk,Var_list,DateChunk=read_all_variables(fle,True,Vars_To_Import)
if ifile == 1:
VarBig = VarChunk
Dates=DateChunk#np.array([tuple(DateChunk)], dtype='datetime64[D]')
print Dates.shape,DateChunk.shape,VarChunk.shape
#Dates.extend(DateChunk)
else:
VarBig = np.vstack((VarBig,VarChunk))
#DateChunk=np.array([tuple(DateChunk)],dtype='datetime64[D]')
print Dates.shape,DateChunk.shape,VarChunk.shape
Dates = np.concatenate((Dates,DateChunk))
#Dates.extend(DateChunk)
elapsed=(time.clock()-start) # elapsed time
Elapsed_tot=Elapsed_tot+elapsed # cumulative elapsed
MeanElapsed=Elapsed_tot/ifile # mean time per file
Fls_left=N_files-(ifile) # files remaining
time_left=Fls_left*MeanElapsed/60 # estimated time left
print ' aggregation took %.1f sec' % elapsed
print ' estimated time remaning: %.1f min' % time_left
fle.close() # close current file
ifile = ifile+1 # increment file counter
return VarBig,Var_list,Dates
def write_gridded_file(write_dir,Var,VarCount,x,y,Varname):
""" writes out the spatially binned data """
if not os.path.exists(write_dir):
os.makedirs(write_dir)
f_base=write_dir+'/'+Varname
np.savetxt(f_base +'.txt', Var, delimiter=',')
np.savetxt(f_base +'_Count.txt', VarCount, delimiter=',')
np.savetxt(f_base+'_x.txt', x, delimiter=',')
np.savetxt(f_base+'_y.txt', y, delimiter=',')
def read_gridded_file(read_dir,Varname):
""" reads in the spatially binned data """
f_base=read_dir+'/'+Varname
Var=np.loadtxt(f_base +'.txt',delimiter=',')
VarCount=np.loadtxt(f_base +'_Count.txt',delimiter=',')
x=np.loadtxt(f_base+'_x.txt',delimiter=',')
y=np.loadtxt(f_base+'_y.txt',delimiter=',')
return Var,VarCount,x,y
def write_taxi_count_speed(write_dir,V1,V1name,V2,V2name,V3,V3name):
""" writes out the spatially binned data """
if not os.path.exists(write_dir):
os.makedirs(write_dir)
f_base=write_dir+'/'
np.savetxt(f_base + V1name + '.txt', V1, delimiter=',')
np.savetxt(f_base + V2name + '.txt', V2, delimiter=',')
np.savetxt(f_base + V3name + '.txt', V3, delimiter=',')
def read_taxi_count_speed(read_dir,Varname):
""" reads in the spatially binned data """
f_base=read_dir+'/'+Varname
Var=np.loadtxt(f_base +'.txt',delimiter=',')
return Var
""" END OF FUNCTIONS """
if __name__ == '__main__':
""" a basic example of reading, processing and plotting some taxi files """
# the directory with the data
dir_base='../data_sub_sampled/'
# choose which variables to import
# possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
# 'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
# 'drop_lat','elapsed_time_min'
Vars_To_Import=['dist_mi','pickup_lon','pickup_lat']
# read in all the data!
VarBig,Var_list=read_taxi_files(dir_base,Vars_To_Import)
# now bin the point data!
DistCount,DistMean,Distx,Disty=tpm.map_proc(VarBig,Var_list,'dist_mi',0.1,60,'True',600,700)
write_gridded_file('../data_products/',DistMean,DistCount,Distx,Disty,'dist_mi')
tpm.plt_map(DistCount,1,1000,Distx,Disty,True)
| gpl-3.0 |
joshloyal/scikit-learn | sklearn/externals/joblib/_memory_helpers.py | 303 | 3605 | try:
# Available in Python 3
from tokenize import open as open_py_source
except ImportError:
# Copied from python3 tokenize
from codecs import lookup, BOM_UTF8
import re
from io import TextIOWrapper, open
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def _detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that
should be used to decode a Python source file. It requires one
argment, readline, in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are
present, but disagree, a SyntaxError will be raised. If the encoding
cookie is an invalid charset, raise a SyntaxError. Note that if a
utf-8 bom is found, 'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be
returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open_py_source(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = open(filename, 'rb')
encoding, lines = _detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text | bsd-3-clause |
buhe/judge | wbox/sandbox.py | 2 | 3154 | from _wbox import UserManager, ProcessManager, NetworkManager
from subprocess import list2cmdline, Popen
from winutils import execution_time
from uuid import uuid1
class WBoxPopen(object):
def __init__(self, argv, time, memory, nproc=1, executable=None, cwd=None, env=None, network_block=False):
self.user = UserManager()
self.process = ProcessManager(self.user.username, self.user.password)
argv = list2cmdline(argv)
if not isinstance(argv, unicode):
argv = argv.decode('mbcs')
self.process.command = argv
if executable is not None:
if not isinstance(executable, unicode):
executable = executable.decode('mbcs')
self.process.executable = executable
if cwd is not None:
if not isinstance(cwd, unicode):
cwd = cwd.decode('mbcs')
self.process.dir = cwd
if env is not None:
self.process.set_environment(self._encode_environment(env))
self.process.time_limit = time
self.process.memory_limit = memory * 1024
self.process.process_limit = nproc
self.returncode = None
self.universal_newlines = False
if executable is not None and network_block:
self.network_block = NetworkManager('wbox_%s' % uuid1(), executable)
else:
self.network_block = None
self.process.spawn()
@staticmethod
def _encode_environment(env):
buf = []
for key, value in env.iteritems():
if not isinstance(key, unicode):
key = key.decode('mbcs')
if not isinstance(value, unicode):
value = value.decode('mbcs')
buf.append(u'%s=%s' % (key, value))
return u'\0'.join(buf) + u'\0\0'
def wait(self, timeout=None):
self.process.wait(timeout)
return self.poll()
def poll(self):
self.returncode = self.process.get_exit_code()
if self.returncode is not None and self.network_block is not None:
self.network_block.dispose()
return self.returncode
def kill(self, code=0xDEADBEEF):
self.process.terminate(code)
@property
def stdin(self):
return self.process.stdin
@property
def stdout(self):
return self.process.stdout
@property
def stderr(self):
return self.process.stderr
@property
def mle(self):
return self.process.mle
@property
def max_memory(self):
return self.process.memory / 1024.
@property
def max_memory_bytes(self):
return self.process.memory
@property
def tle(self):
return self.process.tle
@property
def execution_time(self):
return self.process.execution_time
@property
def cpu_time(self):
return execution_time(self.process._handle)
@property
def r_execution_time(self):
return self.process.execution_time
def communicate(self, stdin=None):
return self._communicate(stdin)
_communicate = Popen._communicate.im_func
_readerthread = Popen._readerthread.im_func
| agpl-3.0 |
DisruptiveLabs/nacha | tests/test_records.py | 2 | 7022 | import datetime
import nacha
from . import TestCase
class TestRecord(TestCase):
pass
class TestFileHeader(TestRecord):
def setUp(self):
self.record = self.fixture_line(1, 'sample').strip('\n')
self.assertEqual(len(self.record), nacha.FileHeader.length)
def test_construction_of_file_record_from_string(self):
fh = nacha.FileHeader.load(self.record)
self.assertEqual(fh.record_type, '1')
self.assertEqual(fh.priority_code, 1)
self.assertEqual(fh.immediate_destination, 91000019)
self.assertEqual(fh.immediate_origin, '1273720697')
self.assertIsNotNone(fh.file_creation_date)
self.assertIsNotNone(fh.file_creation_time)
self.assertRegexpMatches(fh.file_id_modifier, r'[A-Z]')
self.assertEqual(fh.record_size, 94)
self.assertEqual(fh.blocking_factor, 10)
self.assertEqual(fh.format_code, 1)
self.assertEqual(fh.immediate_destination_name, 'WELLS FARGO')
self.assertEqual(fh.immediate_origin_name, 'ALALALAD PAYMENTS')
self.assertEqual(fh.reference_code, '')
def test_serialization_of_record(self):
fh = nacha.FileHeader.load(self.record)
self.assertEqual(len(fh.dump()), nacha.FileHeader.length)
self.assertEqual(fh.dump(), self.record)
def test_construction_of_file_record(self):
fh_original = nacha.FileHeader.load(self.record)
fh = nacha.FileHeader(
immediate_destination='91000019',
immediate_origin='1273720697',
file_id_modifier='A',
file_creation_date=fh_original.file_creation_date,
file_creation_time=fh_original.file_creation_time,
immediate_destination_name='WELLS FARGO',
immediate_origin_name='ALALALAD PAYMENTS',
)
self.assertEqual(fh.dump(), self.record)
self.assertEqual(len(fh.dump()), nacha.FileHeader.length)
def test_construction_of_invalid_record_throws_exception(self):
with self.assertRaises(LookupError) as exc:
fh = nacha.FileHeader(immediate_destination='091000019')
fh.dump()
ex = exc.exception
self.assertEqual(
ex.message, 'FileHeader.immediate_origin value is missing',
)
class TestCompanyBatchHeader(TestRecord):
def setUp(self):
self.record = self.fixture_line(2, 'sample').strip('\n')
self.assertEqual(len(self.record), nacha.CompanyBatchHeader.length)
def test_construction_of_company_batch_header_record(self):
cbh = nacha.CompanyBatchHeader.load(self.record)
self.assertEqual(cbh.record_type, '5')
self.assertEqual(cbh.service_class_code, 200)
self.assertEqual(cbh.company_name, 'ALALALAD')
self.assertEqual(cbh.company_discretionary_data, 'ACH SETTLEMENT')
self.assertEqual(cbh.company_id, '2273720697')
self.assertEqual(cbh.standard_entry_class, 'PPD')
self.assertEqual(cbh.company_entry_description, 'PAYOUTS')
self.assertEqual(cbh.company_descriptive_date, '')
self.assertEqual(cbh.effective_entry_date, datetime.date(2013, 1, 16))
self.assertEqual(cbh.settlement_date, '')
self.assertEqual(cbh.originator_status, 1)
self.assertEqual(cbh.originating_dfi_id, 12737206)
self.assertEqual(cbh.batch_number, 1)
class TestEntryDetail(TestRecord):
def setUp(self):
fixture = self.fixture_line(3, 'sample')
self.record = fixture.strip('\n')
self.assertEqual(len(self.record), nacha.EntryDetail.length)
def test_construction_of_entry_detail_record(self):
edr = nacha.EntryDetail.load(self.record)
self.assertEqual(edr.record_type, '6')
self.assertEqual(edr.transaction_code, 22)
self.assertEqual(edr.receiving_dfi_trn, 11234567)
self.assertEqual(edr.receiving_dfi_trn_check_digit, 8)
self.assertEqual(edr.receiving_dfi_account_number, '1123456789')
self.assertEqual(edr.amount, 12345)
self.assertEqual(edr.individual_id, '98789789')
self.assertEqual(edr.individual_name, 'TEST CREDIT 1')
self.assertEqual(edr.discretionary_data, '')
self.assertEqual(edr.addenda_record_indicator, 0)
self.assertEqual(edr.trace_number, 127372060000001)
def test_serialization_of_record(self):
fh = nacha.EntryDetail.load(self.record)
self.assertEqual(fh.dump(), self.record)
self.assertEqual(len(fh.dump()), nacha.EntryDetail.length)
class TestCompanyBatchControl(TestRecord):
def setUp(self):
fixture = self.fixture_line(5, 'sample')
self.record = fixture.strip('\n')
self.assertEqual(len(self.record), nacha.CompanyBatchControl.length)
def test_construction_of_company_batch_control_record(self):
cbcr = nacha.CompanyBatchControl.load(self.record)
self.assertEqual(cbcr.record_type, '8')
self.assertEqual(cbcr.service_class_code, 200)
self.assertEqual(cbcr.entry_addenda_count, 2)
self.assertEqual(cbcr.entry_hash, 24388701)
self.assertEqual(cbcr.total_batch_debit_entry_amount, 0)
self.assertEqual(cbcr.total_batch_credit_entry_amount, 12490)
self.assertEqual(cbcr.company_id, '2273720697')
self.assertEqual(cbcr.message_authentication_code, '')
self.assertEqual(cbcr.blank, '')
self.assertEqual(cbcr.originating_dfi_id, 12737206)
self.assertEqual(cbcr.batch_number, 1)
class TestFileControl(TestRecord):
def setUp(self):
fixture = self.fixture_line(6, 'sample')
self.record = fixture.strip('\n')
self.assertEqual(len(self.record), nacha.FileControl.length)
def test_construction_of_file_control_record(self):
fcr = nacha.FileControl.load(self.record)
self.assertEqual(fcr.record_type, '9')
self.assertEqual(fcr.batch_count, 1)
self.assertEqual(fcr.block_count, 1)
self.assertEqual(fcr.entry_addenda_record_count, 2)
self.assertEqual(fcr.entry_hash_total, 24388701)
self.assertEqual(fcr.total_file_debit_entry_amount, 0)
self.assertEqual(fcr.total_file_credit_entry_amount, 12490)
self.assertEqual(fcr.filler, '')
class TestEntryDetailAddendum(TestRecord):
def setUp(self):
fixture = self.fixture_line(5, 'sample_with_addenda')
self.record = fixture.strip('\n')
self.assertEqual(len(self.record), nacha.EntryDetailAddendum.length)
def test_construction_of_file_control_record(self):
record = nacha.EntryDetailAddendum.load(self.record)
self.assertEqual(record.record_type, '7')
self.assertEqual(record.addenda_type, 5)
self.assertEqual(
record.payment_related_information,
'0*U*00307*000000183*0*P*:\GS*RA*9133131313*6126127272*20000888*0830*183*T*002010',
)
self.assertEqual(record.addenda_sequence_number, 1)
self.assertEqual(record.entry_detail_sequence_number, 2)
| isc |
intel-analytics/BigDL | spark/dl/src/test/resources/tf/models/inception_resnet_v2.py | 9 | 1518 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from nets import inception_resnet_v2
from sys import argv
from util import run_model
def main():
"""
You can also run these commands manually to generate the pb file
1. git clone https://github.com/tensorflow/models.git
2. export PYTHONPATH=Path_to_your_model_folder
3. python alexnet.py
"""
tf.set_random_seed(1)
height, width = 299, 299
inputs = tf.Variable(tf.random_uniform((2, height, width, 3)), name='input')
inputs = tf.identity(inputs, "input_node")
net, end_points = inception_resnet_v2.inception_resnet_v2(inputs,is_training = False)
print("nodes in the graph")
for n in end_points:
print(n + " => " + str(end_points[n]))
net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
run_model(net_outputs, argv[1], 'InceptionResnetV2', argv[3] == 'True')
if __name__ == "__main__":
main()
| apache-2.0 |
xiang12835/python_web | py2_web2py/web2py/applications/main/languages/es.py | 4 | 23678 | # -*- coding: utf-8 -*-
{
'!langcode!': 'es',
'!langname!': 'Español',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"actualice" es una expresión opcional como "campo1=\'nuevo_valor\'". No se puede actualizar o eliminar resultados de un JOIN',
'%(nrows)s records found': '%(nrows)s registros encontrados',
'%s %%{position}': '%s %%{posición}',
'%s %%{row} deleted': '%s %%{fila} %%{eliminada}',
'%s %%{row} updated': '%s %%{fila} %%{actualizada}',
'%s selected': '%s %%{seleccionado}',
'%Y-%m-%d': '%d/%m/%A',
'%Y-%m-%d %H:%M:%S': '%d/%m/%A %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'(something like "it-it")': '(algo como "it-it")',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**no disponible** (requiere la libreria [[guppy http://pypi.python.org/pypi/guppy/ popup]] de Python)',
'?': '?',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ha ocurrido un error, por favor [[recargar %s]] la página',
'@markmin\x01Number of entries: **%s**': 'Número de entradas: **%s**',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**no disponible**``:red (Necesita libreria de Python: [[guppy http://pypi.python.org/pypi/guppy/ popup]])',
'A new version of web2py is available': 'Hay una nueva versión de web2py disponible',
'A new version of web2py is available: %s': 'Hay una nueva versión de web2py disponible: %s',
'About': 'Acerca de',
'about': 'acerca de',
'About application': 'Acerca de la aplicación',
'Access Control': 'Control de Acceso',
'Add': 'Añadir',
'additional code for your application': 'código adicional para su aplicación',
'admin': 'administrar',
'admin disabled because no admin password': 'admin deshabilitado por falta de contraseña',
'admin disabled because not supported on google app engine': 'admin deshabilitado, no es soportado en GAE',
'admin disabled because unable to access password file': 'admin deshabilitado, imposible acceder al archivo con la contraseña',
'Admin is disabled because insecure channel': 'Admin deshabilitado, el canal no es seguro',
'Admin is disabled because unsecure channel': 'Admin deshabilitado, el canal no es seguro',
'Administrative interface': 'Interfaz administrativa',
'Administrative Interface': 'Interfaz Administrativa',
'Administrator Password:': 'Contraseña del Administrador:',
'Ajax Recipes': 'Recetas AJAX',
'An error occured, please %s the page': 'Ha ocurrido un error, por favor %s la página',
'An error occured, please [[reload %s]] the page': 'Ha ocurrido un error, por favor [[reload %s]] la pagina',
'And': 'Y',
'and rename it (required):': 'y renómbrela (requerido):',
'and rename it:': ' y renómbrelo:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin deshabilitado, el canal no es seguro',
'application "%s" uninstalled': 'aplicación "%s" desinstalada',
'application compiled': 'aplicación compilada',
'application is compiled and cannot be designed': 'la aplicación está compilada y no puede ser modificada',
'Apply changes': 'Aplicar cambios',
'Appointment': 'Nombramiento',
'Are you sure you want to delete file "%s"?': '¿Está seguro que desea eliminar el archivo "%s"?',
'Are you sure you want to delete this object?': '¿Está seguro que desea borrar este objeto?',
'Are you sure you want to uninstall application "%s"': '¿Está seguro que desea desinstalar la aplicación "%s"',
'Are you sure you want to uninstall application "%s"?': '¿Está seguro que desea desinstalar la aplicación "%s"?',
'at': 'en',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENCION: Inicio de sesión requiere una conexión segura (HTTPS) o corriendo en localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENCION: NO EJECUTE VARIAS PRUEBAS SIMULTANEAMENTE, NO SON THREAD SAFE.',
'ATTENTION: you cannot edit the running application!': 'ATENCION: no puede modificar la aplicación que está ejecutandose!',
'Authentication': 'Autenticación',
'Authentication failed at client DB!': '¡La autenticación ha fallado en la BDD cliente!',
'Authentication failed at main DB!': '¡La autenticación ha fallado en la BDD principal!',
'Available Databases and Tables': 'Bases de datos y tablas disponibles',
'Back': 'Atrás',
'Buy this book': 'Compra este libro',
"Buy web2py's book": 'Compra el libro de web2py',
'Cache': 'Caché',
'cache': 'caché',
'Cache Cleared': 'Cache Limpiada',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'La Cache contiene items con **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} de antiguedad.',
'Cache Keys': 'Llaves de la Caché',
'cache, errors and sessions cleaned': 'caché, errores y sesiones eliminados',
'Cannot be empty': 'No puede estar vacío',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'No se puede compilar: hay errores en su aplicación. Depure, corrija errores y vuelva a intentarlo.',
'cannot create file': 'no es posible crear archivo',
'cannot upload file "%(filename)s"': 'no es posible subir archivo "%(filename)s"',
'Change Password': 'Cambie la Contraseña',
'Change password': 'Cambie la contraseña',
'change password': 'cambie la contraseña',
'check all': 'marcar todos',
'Check to delete': 'Marque para eliminar',
'choose one': 'escoja uno',
'clean': 'limpiar',
'Clear': 'Limpiar',
'Clear CACHE?': '¿Limpiar CACHÉ?',
'Clear DISK': 'Limpiar DISCO',
'Clear RAM': 'Limpiar RAM',
'Click on the link %(link)s to reset your password': 'Pulse en el enlace %(link)s para reiniciar su contraseña',
'click to check for upgrades': 'haga clic para buscar actualizaciones',
'client': 'cliente',
'Client IP': 'IP del Cliente',
'Close': 'Cerrar',
'Community': 'Comunidad',
'compile': 'compilar',
'compiled application removed': 'aplicación compilada eliminada',
'Components and Plugins': 'Componentes y Plugins',
'Config.ini': 'Config.ini',
'contains': 'contiene',
'Controller': 'Controlador',
'Controllers': 'Controladores',
'controllers': 'controladores',
'Copyright': 'Copyright',
'create file with filename:': 'cree archivo con nombre:',
'Create new application': 'Cree una nueva aplicación',
'create new application:': 'cree una nueva aplicación:',
'Created By': 'Creado Por',
'Created On': 'Creado En',
'CSV (hidden cols)': 'CSV (columnas ocultas)',
'Current request': 'Solicitud en curso',
'Current response': 'Respuesta en curso',
'Current session': 'Sesión en curso',
'currently saved or': 'actualmente guardado o',
'customize me!': '¡Personalizame!',
'data uploaded': 'datos subidos',
'Database': 'Base de datos',
'Database %s select': 'selección en base de datos %s',
'database administration': 'administración de base de datos',
'Database Administration (appadmin)': 'Administración de Base de Datos (appadmin)',
'Date and Time': 'Fecha y Hora',
'DB': 'BDD',
'db': 'bdd',
'DB Model': 'Modelo BDD',
'defines tables': 'define tablas',
'Delete': 'Eliminar',
'delete': 'eliminar',
'delete all checked': 'eliminar marcados',
'Delete:': 'Eliminar:',
'Demo': 'Demostración',
'Deploy on Google App Engine': 'Despliegue en Google App Engine',
'Deployment Recipes': 'Recetas de despliegue',
'Description': 'Descripción',
'design': 'diseño',
'DESIGN': 'DISEÑO',
'Design': 'Diseño',
'Design for': 'Diseño por',
'detecting': 'detectando',
'DISK': 'DISCO',
'Disk Cache Keys': 'Llaves de Caché en Disco',
'Disk Cleared': 'Disco limpiado',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'El DISCO contiene items con **%(hours)02d** %%{hora(hours)} **%(min)02d** %%{minuto(min)} **%(sec)02d** %%{segundo(sec)} de antiguedad.',
'Documentation': 'Documentación',
"Don't know what to do?": '¿No sabe que hacer?',
'done!': '¡hecho!',
'Download': 'Descargas',
'E-mail': 'Correo electrónico',
'edit': 'editar',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'Edit application': 'Editar aplicación',
'edit controller': 'editar controlador',
'Edit current record': 'Edite el registro actual',
'Edit Profile': 'Editar Perfil',
'edit profile': 'editar perfil',
'Edit This App': 'Edite esta App',
'Editing file': 'Editando archivo',
'Editing file "%s"': 'Editando archivo "%s"',
'Email and SMS': 'Correo electrónico y SMS',
'Email sent': 'Correo electrónico enviado',
'End of impersonation': 'Fin de suplantación',
'enter a number between %(min)g and %(max)g': 'introduzca un numero entre %(min)g y %(max)g',
'enter a value': 'introduzca un valor',
'enter an integer between %(min)g and %(max)g': 'introduzca un número entero entre %(min)g y %(max)g',
'enter date and time as %(format)s': 'introduzca fecha y hora como %(format)s',
'Error logs for "%(app)s"': 'Bitácora de errores para "%(app)s"',
'errors': 'errores',
'Errors': 'Errores',
'Errors in form, please check it out.': 'Hay errores en el formulario, por favor compruébelo.',
'export as csv file': 'exportar como archivo CSV',
'Export:': 'Exportar:',
'exposes': 'expone',
'extends': 'extiende',
'failed to reload module': 'la recarga del módulo ha fallado',
'FAQ': 'Preguntas frecuentes',
'file "%(filename)s" created': 'archivo "%(filename)s" creado',
'file "%(filename)s" deleted': 'archivo "%(filename)s" eliminado',
'file "%(filename)s" uploaded': 'archivo "%(filename)s" subido',
'file "%(filename)s" was not deleted': 'archivo "%(filename)s" no fué eliminado',
'file "%s" of %s restored': 'archivo "%s" de %s restaurado',
'file changed on disk': 'archivo modificado en el disco',
'file does not exist': 'archivo no existe',
'file saved on %(time)s': 'archivo guardado %(time)s',
'file saved on %s': 'archivo guardado %s',
'First name': 'Nombre',
'Forgot username?': '¿Olvidó el nombre de usuario?',
'Forms and Validators': 'Formularios y validadores',
'Free Applications': 'Aplicaciones Libres',
'Functions with no doctests will result in [passed] tests.': 'Funciones sin doctests equivalen a pruebas [aceptadas].',
'Graph Model': 'Modelo en Grafo',
'Group %(group_id)s created': 'Grupo %(group_id)s creado',
'Group ID': 'ID de Grupo',
'Group uniquely assigned to user %(id)s': 'Grupo asignado únicamente al usuario %(id)s',
'Groups': 'Grupos',
'Hello World': 'Hola Mundo',
'help': 'ayuda',
'Helping web2py': 'Ayudando a web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'Inicio',
'How did you get here?': '¿Cómo llegaste aquí?',
'htmledit': 'htmledit',
'Impersonate': 'Suplantar',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'in': 'en',
'includes': 'incluye',
'Index': 'Índice',
'insert new': 'inserte nuevo',
'insert new %s': 'inserte nuevo %s',
'Installed applications': 'Aplicaciones instaladas',
'Insufficient privileges': 'Privilegios insuficientes',
'internal error': 'error interno',
'Internal State': 'Estado Interno',
'Introduction': 'Introducción',
'Invalid action': 'Acción inválida',
'Invalid email': 'Correo electrónico inválido',
'invalid expression': 'expresión inválida',
'Invalid login': 'Inicio de sesión inválido',
'invalid password': 'contraseña inválida',
'Invalid Query': 'Consulta inválida',
'invalid request': 'Solicitud inválida',
'Invalid reset password': 'Reinicio de contraseña inválido',
'invalid ticket': 'Tiquete inválido',
'Is Active': 'Está Activo',
'Key': 'Llave',
'language file "%(filename)s" created/updated': 'archivo de lenguaje "%(filename)s" creado/actualizado',
'Language files (static strings) updated': 'Archivos de lenguaje (cadenas estáticas) actualizados',
'languages': 'lenguajes',
'Languages': 'Lenguajes',
'languages updated': 'lenguajes actualizados',
'Last name': 'Apellido',
'Last saved on:': 'Guardado en:',
'Layout': 'Diseño de página',
'Layout Plugins': 'Plugins de diseño',
'Layouts': 'Diseños de páginas',
'License for': 'Licencia para',
'Live Chat': 'Chat en vivo',
'loading...': 'cargando...',
'Log In': 'Iniciar sesion',
'Logged in': 'Sesión iniciada',
'Logged out': 'Sesión finalizada',
'Login': 'Inicio de sesión',
'login': 'inicio de sesión',
'Login disabled by administrator': 'Inicio de sesión deshabilitado por el administrador',
'Login to the Administrative Interface': 'Inicio de sesión para la Interfaz Administrativa',
'logout': 'fin de sesión',
'Logout': 'Fin de sesión',
'Lost Password': 'Contraseña perdida',
'Lost password?': '¿Olvidó la contraseña?',
'lost password?': '¿olvidó la contraseña?',
'Main Menu': 'Menú principal',
'Manage %(action)s': 'Gestionar %(action)s',
'Manage Access Control': 'Gestionar control de acceso',
'Manage Cache': 'Gestionar la Caché',
'Memberships': 'Membresias',
'Menu Model': 'Modelo "menu"',
'merge': 'Combinar',
'Models': 'Modelos',
'models': 'modelos',
'Modified By': 'Modificado Por',
'Modified On': 'Modificado En',
'Modules': 'Módulos',
'modules': 'módulos',
'must be YYYY-MM-DD HH:MM:SS!': '¡debe ser DD/MM/YYYY HH:MM:SS!',
'must be YYYY-MM-DD!': '¡debe ser DD/MM/YYYY!',
'My Sites': 'Mis Sitios',
'Name': 'Nombre',
'New': 'Nuevo',
'New %(entity)s': 'Nuevo %(entity)s',
'new application "%s" created': 'nueva aplicación "%s" creada',
'New password': 'Contraseña nueva',
'New Record': 'Registro nuevo',
'new record inserted': 'nuevo registro insertado',
'next %s rows': 'siguiente %s filas',
'next 100 rows': '100 filas siguientes',
'NO': 'NO',
'No databases in this application': 'No hay bases de datos en esta aplicación',
'No records found': 'No se han encontrado registros',
'Not authorized': 'No autorizado',
'not in': 'no en',
'Number of entries: **%s**': 'Numero de entradas: **%s**',
'Object or table name': 'Nombre del objeto o tabla',
'Old password': 'Contraseña vieja',
'Online book': 'Libro Online',
'Online examples': 'Ejemplos en línea',
'Or': 'O',
'or import from csv file': 'o importar desde archivo CSV',
'or provide application url:': 'o provea URL de la aplicación:',
'Origin': 'Origen',
'Original/Translation': 'Original/Traducción',
'Other Plugins': 'Otros Plugins',
'Other Recipes': 'Otras Recetas',
'Overview': 'Resumen',
'pack all': 'empaquetar todo',
'pack compiled': 'empaquetar compilados',
'Password': 'Contraseña',
'Password changed': 'Contraseña cambiada',
"Password fields don't match": 'Los campos de contraseña no coinciden',
'Password reset': 'Reinicio de contraseña',
'Peeking at file': 'Visualizando archivo',
'Permission': 'Permiso',
'Permissions': 'Permisos',
'Phone': 'Teléfono',
'please input your password again': 'por favor introduzca su contraseña otra vez',
'Plugins': 'Plugins',
'Powered by': 'Este sitio usa',
'Preface': 'Prefacio',
'previous %s rows': 'fila %s anterior',
'previous 100 rows': '100 filas anteriores',
'Profile': 'Perfil',
'Profile updated': 'Perfil actualizado',
'pygraphviz library not found': 'Libreria pygraphviz no encontrada',
'Python': 'Python',
'Query Not Supported: %s': 'Consulta No Soportada: %s',
'Query:': 'Consulta:',
'Quick Examples': 'Ejemplos Rápidos',
'RAM': 'RAM',
'RAM Cache Keys': 'Llaves de la Caché en RAM',
'Ram Cleared': 'Ram Limpiada',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'La RAM contiene items con **%(hours)02d** %%{hora(hours)} **%(min)02d** %%{minuto(min)} **%(sec)02d** %%{segundo(sec)} de antiguedad.',
'Recipes': 'Recetas',
'Record': 'Registro',
'Record %(id)s created': 'Registro %(id)s creado',
'Record Created': 'Registro Creado',
'record does not exist': 'el registro no existe',
'Record ID': 'ID de Registro',
'Record id': 'Id de registro',
'register': 'regístrese',
'Register': 'Regístrese',
'Registration identifier': 'Identificador de Registro',
'Registration key': 'Llave de registro',
'Registration successful': 'Registro con éxito',
'reload': 'recargar',
'Remember me (for 30 days)': 'Recuérdame (durante 30 días)',
'remove compiled': 'eliminar compiladas',
'Request reset password': 'Solicitar reinicio de contraseña',
'Reset password': 'Reiniciar contraseña',
'Reset Password key': 'Restaurar Llave de la Contraseña',
'Resolve Conflict file': 'archivo Resolución de Conflicto',
'restore': 'restaurar',
'Retrieve username': 'Recuperar nombre de usuario',
'revert': 'revertir',
'Role': 'Rol',
'Roles': 'Roles',
'Rows in Table': 'Filas en la tabla',
'Rows selected': 'Filas seleccionadas',
'save': 'guardar',
'Save model as...': 'Guardar modelo como...',
'Saved file hash:': 'Hash del archivo guardado:',
'Search': 'Buscar',
'Semantic': 'Semántica',
'Services': 'Servicios',
'session expired': 'sesión expirada',
'shell': 'terminal',
'Sign Up': 'Registrarse',
'site': 'sitio',
'Size of cache:': 'Tamaño de la Caché:',
'some files could not be removed': 'algunos archivos no pudieron ser removidos',
'start': 'inicio',
'starts with': 'comienza por',
'state': 'estado',
'static': 'estático',
'Static files': 'Archivos estáticos',
'Statistics': 'Estadísticas',
'Stylesheet': 'Hoja de estilo',
'Submit': 'Enviar',
'submit': 'enviar',
'Success!': '¡Correcto!',
'Support': 'Soporte',
'Sure you want to delete this object?': '¿Está seguro que desea eliminar este objeto?',
'Table': 'tabla',
'Table name': 'Nombre de la tabla',
'test': 'probar',
'Testing application': 'Probando aplicación',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "consulta" es una condición como "db.tabla1.campo1==\'valor\'". Algo como "db.tabla1.campo1==db.tabla2.campo2" resulta en un JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'la lógica de la aplicación, cada ruta URL se mapea en una función expuesta en el controlador',
'The Core': 'El Núcleo',
'the data representation, define database tables and sets': 'la representación de datos, define tablas y conjuntos de base de datos',
'The output of the file is a dictionary that was rendered by the view %s': 'La salida de dicha función es un diccionario que es desplegado por la vista %s',
'the presentations layer, views are also known as templates': 'la capa de presentación, las vistas también son llamadas plantillas',
'The Views': 'Las Vistas',
'There are no controllers': 'No hay controladores',
'There are no models': 'No hay modelos',
'There are no modules': 'No hay módulos',
'There are no static files': 'No hay archivos estáticos',
'There are no translators, only default language is supported': 'No hay traductores, sólo el lenguaje por defecto es soportado',
'There are no views': 'No hay vistas',
'these files are served without processing, your images go here': 'estos archivos son servidos sin procesar, sus imágenes van aquí',
'This App': 'Esta Aplicación',
'This email already has an account': 'Este correo electrónico ya tiene una cuenta',
'This is a copy of the scaffolding application': 'Esta es una copia de la aplicación de andamiaje',
'This is the %(filename)s template': 'Esta es la plantilla %(filename)s',
'Ticket': 'Tiquete',
'Time in Cache (h:m:s)': 'Tiempo en Caché (h:m:s)',
'Timestamp': 'Marca de tiempo',
'to previous version.': 'a la versión previa.',
'To emulate a breakpoint programatically, write:': 'Emular un punto de ruptura programáticamente, escribir:',
'to use the debugger!': '¡usar el depurador!',
'toggle breakpoint': 'alternar punto de ruptura',
'Toggle comment': 'Alternar comentario',
'Toggle Fullscreen': 'Alternar pantalla completa',
'too short': 'demasiado corto',
'Traceback': 'Rastrear',
'translation strings for the application': 'cadenas de caracteres de traducción para la aplicación',
'try': 'intente',
'try something like': 'intente algo como',
'TSV (Excel compatible)': 'TSV (compatible con Excel)',
'TSV (Excel compatible, hidden cols)': 'TSV (compatible con Excel, columnas ocultas)',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'No es posible verificar la existencia de actualizaciones',
'unable to create application "%s"': 'no es posible crear la aplicación "%s"',
'unable to delete file "%(filename)s"': 'no es posible eliminar el archivo "%(filename)s"',
'Unable to download': 'No es posible la descarga',
'Unable to download app': 'No es posible descargar la aplicación',
'unable to parse csv file': 'no es posible analizar el archivo CSV',
'unable to uninstall "%s"': 'no es posible instalar "%s"',
'uncheck all': 'desmarcar todos',
'uninstall': 'desinstalar',
'unknown': 'desconocido',
'update': 'actualizar',
'update all languages': 'actualizar todos los lenguajes',
'Update:': 'Actualice:',
'upload application:': 'subir aplicación:',
'Upload existing application': 'Suba esta aplicación',
'upload file:': 'suba archivo:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, y ~(...) para NOT, para crear consultas más complejas.',
'User': 'Usuario',
'User %(id)s is impersonating %(other_id)s': 'El usuario %(id)s está suplantando %(other_id)s',
'User %(id)s Logged-in': 'El usuario %(id)s inició la sesión',
'User %(id)s Logged-out': 'El usuario %(id)s finalizó la sesión',
'User %(id)s Password changed': 'Contraseña del usuario %(id)s cambiada',
'User %(id)s Password reset': 'Contraseña del usuario %(id)s reiniciada',
'User %(id)s Profile updated': 'Actualizado el perfil del usuario %(id)s',
'User %(id)s Registered': 'Usuario %(id)s Registrado',
'User %(id)s Username retrieved': 'Se ha recuperado el nombre de usuario del usuario %(id)s',
'User %(username)s Logged-in': 'El usuario %(username)s inició la sesión',
"User '%(username)s' Logged-in": "El usuario '%(username)s' inició la sesión",
"User '%(username)s' Logged-out": "El usuario '%(username)s' finalizó la sesión",
'User Id': 'Id de Usuario',
'User ID': 'ID de Usuario',
'User Logged-out': 'El usuario finalizó la sesión',
'Username': 'Nombre de usuario',
'Username retrieve': 'Recuperar nombre de usuario',
'Users': 'Usuarios',
'value already in database or empty': 'el valor ya existe en la base de datos o está vacío',
'value not allowed': 'valor no permitido',
'value not in database': 'el valor no está en la base de datos',
'Verify Password': 'Verificar Contraseña',
'Version': 'Versión',
'versioning': 'versionado',
'Videos': 'Vídeos',
'View': 'Vista',
'view': 'vista',
'View %(entity)s': 'Ver %(entity)s',
'Views': 'Vistas',
'views': 'vistas',
'web2py is up to date': 'web2py está actualizado',
'web2py Recent Tweets': 'Tweets Recientes de web2py',
'Welcome': 'Bienvenido',
'Welcome %s': 'Bienvenido %s',
'Welcome to web2py': 'Bienvenido a web2py',
'Welcome to web2py!': '¡Bienvenido a web2py!',
'Which called the function %s located in the file %s': 'La cual llamó la función %s localizada en el archivo %s',
'Working...': 'Trabajando...',
'YES': 'SÍ',
'You are successfully running web2py': 'Usted está ejecutando web2py exitosamente',
'You can modify this application and adapt it to your needs': 'Usted puede modificar esta aplicación y adaptarla a sus necesidades',
'You visited the url %s': 'Usted visitó la url %s',
'Your username is: %(username)s': 'Su nombre de usuario es: %(username)s',
}
| apache-2.0 |
jkitzes/macroeco | macroeco/models/__init__.py | 1 | 1969 | """
===============================
Models (:mod:`macroeco.models`)
===============================
This module contains distributions and curves (i.e., standard mathematical
functions) commonly used in analysis of ecological patterns.
Distributions
=============
All of the distributions here are subclasses of either
`~scipy.stats.rv_continuous` and `~scipy.stats.rv_discrete` found in
`scipy.stats`. Several of the distributions here are similar to or based on
existing distributions found in `scipy.stats` but are updated to allow the use
of common ecological parameterizations.
In addition to all of the methods found in `scipy.stats`, methods for fitting
distributions and curves to data and for translating common distribution
arguments into formal parameters (i.e., deriving the ``p`` of the geometric
distribution from the distribution mean) are also provided in these classes.
The following discrete distributions are available.
.. autosummary::
:toctree: generated/
geom
geom_uptrunc
nbinom
nbinom_ztrunc
cnbinom
logser
logser_uptrunc
plnorm
plnorm_ztrunc
dgamma
The following continuous distributions are available.
.. autosummary::
:toctree: generated/
expon
expon_uptrunc
lognorm
Curves
======
Several common curves used in ecologial analysis are included here.
.. autosummary::
:toctree: generated/
power_law
mete_sar
mete_sar_iterative
mete_ear
sampling_sar
sampling_sar_iterative
sampling_ear
"""
from _distributions import (geom, geom_uptrunc, nbinom, nbinom_ztrunc,
cnbinom, logser, logser_uptrunc, plnorm,
plnorm_ztrunc, expon, expon_uptrunc, lognorm,
dgamma)
from ._curves import (power_law,
mete_sar, mete_ear, mete_sar_iterative,
mete_upscale_iterative_alt, sampling_sar,
sampling_sar_iterative, sampling_ear)
| bsd-2-clause |
dianchen96/gym | envs/__init__.py | 1 | 13852 | from gym.envs.registration import registry, register, make, spec
# Algorithmic
# ----------------------------------------
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
# Classic
# ----------------------------------------
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
max_episode_steps=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
max_episode_steps=500,
)
# Box2d
# ----------------------------------------
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v2',
entry_point='gym.envs.box2d:BipedalWalker',
max_episode_steps=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v2',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
max_episode_steps=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
max_episode_steps=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
reward_threshold=0.99, # optimum = 1
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
max_episode_steps=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
max_episode_steps=100,
)
register(
id='Taxi-v2',
entry_point='gym.envs.toy_text.taxi:TaxiEnv',
reward_threshold=8, # optimum = 8.46
max_episode_steps=200,
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text.guessing_game:GuessingGame',
max_episode_steps=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text.hotter_colder:HotterColder',
max_episode_steps=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(
id='Reacher-v1',
entry_point='gym.envs.mujoco:ReacherEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='InvertedPendulum-v1',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v1',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v1',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='Hopper-v1',
entry_point='gym.envs.mujoco:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Swimmer-v1',
entry_point='gym.envs.mujoco:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Walker2d-v1',
max_episode_steps=1000,
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Ant-v1',
entry_point='gym.envs.mujoco:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Humanoid-v1',
entry_point='gym.envs.mujoco:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='HumanoidStandup-v1',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
max_episode_steps=1000,
)
# Custom Mujoco
# ----------------------------------------
register(
id="Box3dReachPixel-v0",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelGrey",
max_episode_steps=200,
)
register(
id="Box3dReachPixel-v1",
entry_point="gym.envs.mujoco:Box3dFixedReachEnvPixelRGB",
max_episode_steps=200,
)
register(
id="Box3dReach-v6",
entry_point="gym.envs.mujoco:Box3dFixedReachMulObjEnv",
max_episode_steps=1000,
)
register(
id="Box3dReach-v4",
entry_point="gym.envs.mujoco:Box3dFixedReachHarderEnv",
max_episode_steps=200,
)
register(
id="Box3dReach-v5",
entry_point="gym.envs.mujoco:Box3dFixedReachHardestEnv",
max_episode_steps=200,
)
register(
id="Box3dReach-v3",
entry_point="gym.envs.mujoco:Box3dContactReachEnv",
max_episode_steps=200,
)
register(
id="Box3dReach-v2",
entry_point="gym.envs.mujoco:Box3dFixedReachEnv",
max_episode_steps=200,
)
register(
id='Box3dReach-v0',
entry_point='gym.envs.mujoco:Box3dReachPosEnv',
max_episode_steps=100,
)
register(
id='Box3dReach-v1',
entry_point='gym.envs.mujoco:Box3dReachEnv',
max_episode_steps=100,
)
register(
id='Box3dGrasp-v0',
entry_point='gym.envs.mujoco:Box3dGraspEnv',
max_episode_steps=1000,
)
register(
id='Box3dNoReward-v0',
entry_point='gym.envs.mujoco:Box3dNoRewardEnv',
max_episode_steps=200,
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'repeat_action_probability': 0.25},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id='{}-v3'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
# Standard Deterministic (as in the original DeepMind paper)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
# Use a deterministic frame skip.
register(
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip, 'repeat_action_probability': 0.25},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}Deterministic-v3'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1, 'repeat_action_probability': 0.25}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# No frameskip. (Atari has no entropy source, so these are
# deterministic environments.)
register(
id='{}NoFrameskip-v3'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# Board games
# ----------------------------------------
register(
id='Go9x9-v0',
entry_point='gym.envs.board_game:GoEnv',
kwargs={
'player_color': 'black',
'opponent': 'pachi:uct:_2400',
'observation_type': 'image3c',
'illegal_move_mode': 'lose',
'board_size': 9,
},
# The pachi player seems not to be determistic given a fixed seed.
# (Reproduce by running 'import gym; h = gym.make('Go9x9-v0'); h.seed(1); h.reset(); h.step(15); h.step(16); h.step(17)' a few times.)
#
# This is probably due to a computation time limit.
nondeterministic=True,
)
register(
id='Go19x19-v0',
entry_point='gym.envs.board_game:GoEnv',
kwargs={
'player_color': 'black',
'opponent': 'pachi:uct:_2400',
'observation_type': 'image3c',
'illegal_move_mode': 'lose',
'board_size': 19,
},
nondeterministic=True,
)
register(
id='Hex9x9-v0',
entry_point='gym.envs.board_game:HexEnv',
kwargs={
'player_color': 'black',
'opponent': 'random',
'observation_type': 'numpy3c',
'illegal_move_mode': 'lose',
'board_size': 9,
},
)
# Debugging
# ----------------------------------------
register(
id='OneRoundDeterministicReward-v0',
entry_point='gym.envs.debugging:OneRoundDeterministicRewardEnv',
local_only=True
)
register(
id='TwoRoundDeterministicReward-v0',
entry_point='gym.envs.debugging:TwoRoundDeterministicRewardEnv',
local_only=True
)
register(
id='OneRoundNondeterministicReward-v0',
entry_point='gym.envs.debugging:OneRoundNondeterministicRewardEnv',
local_only=True
)
register(
id='TwoRoundNondeterministicReward-v0',
entry_point='gym.envs.debugging:TwoRoundNondeterministicRewardEnv',
local_only=True,
)
# Parameter tuning
# ----------------------------------------
register(
id='ConvergenceControl-v0',
entry_point='gym.envs.parameter_tuning:ConvergenceControl',
)
register(
id='CNNClassifierTraining-v0',
entry_point='gym.envs.parameter_tuning:CNNClassifierTraining',
)
# Safety
# ----------------------------------------
# interpretability envs
register(
id='PredictActionsCartpole-v0',
entry_point='gym.envs.safety:PredictActionsCartpoleEnv',
max_episode_steps=200,
)
register(
id='PredictObsCartpole-v0',
entry_point='gym.envs.safety:PredictObsCartpoleEnv',
max_episode_steps=200,
)
# semi_supervised envs
# probably the easiest:
register(
id='SemisuperPendulumNoise-v0',
entry_point='gym.envs.safety:SemisuperPendulumNoiseEnv',
max_episode_steps=200,
)
# somewhat harder because of higher variance:
register(
id='SemisuperPendulumRandom-v0',
entry_point='gym.envs.safety:SemisuperPendulumRandomEnv',
max_episode_steps=200,
)
# probably the hardest because you only get a constant number of rewards in total:
register(
id='SemisuperPendulumDecay-v0',
entry_point='gym.envs.safety:SemisuperPendulumDecayEnv',
max_episode_steps=200,
)
# off_switch envs
register(
id='OffSwitchCartpole-v0',
entry_point='gym.envs.safety:OffSwitchCartpoleEnv',
max_episode_steps=200,
)
register(
id='OffSwitchCartpoleProb-v0',
entry_point='gym.envs.safety:OffSwitchCartpoleProbEnv',
max_episode_steps=200,
)
| mit |
GEHC-Surgery/ITK | Wrapping/Generators/SwigInterface/pygccxml-1.0.0/pygccxml/declarations/dependencies.py | 13 | 2149 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
this module contains class that keeps dependency information of some declaration
"""
import cpptypes
class dependency_info_t( object ):
def __init__( self, declaration, depend_on_it, access_type=None, hint=None ):
object.__init__( self )
#prevent recursive import
import class_declaration
assert isinstance( depend_on_it, ( class_declaration.class_t, cpptypes.type_t ) )
self._declaration = declaration
self._depend_on_it = depend_on_it
self._access_type = access_type
self._hint = hint
@property
def declaration( self ):
return self._declaration
#short name
decl = declaration
@property
def depend_on_it( self ):
return self._depend_on_it
def _get_access_type( self ):
return self._access_type
def _set_access_type( self, access_type ):
self._access_type = access_type
access_type = property( _get_access_type, _set_access_type )
def __str__( self ):
return 'declaration "%s" depends( %s ) on "%s" ' \
% ( self.declaration, self.access_type, self.depend_on_it )
@property
def hint(self):
"""the declaration, that report dependency can put some additional inforamtion
about dependency. It can be used later"""
return self._hint
def find_out_depend_on_declaration( self ):
"""if declaration depends on other declaration and not on some type
this function will return reference to it. Otherwise None will be returned
"""
#prevent recursive import
from pygccxml import declarations
if isinstance( self.depend_on_it, declarations.declaration_t ):
return self.depend_on_it
base_type = declarations.base_type( declarations.remove_alias( self.depend_on_it ) )
if isinstance( base_type, cpptypes.declarated_t ):
return base_type.declaration
return None
| apache-2.0 |
resmo/ansible | test/units/modules/network/netvisor/test_pn_port_config.py | 23 | 2782 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_port_config
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule
class TestPortConfigModule(TestNvosModule):
module = pn_port_config
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_port_config.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['update'] == 'port-config-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
def test_pn_port_config_modify_t1(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_port': '1,2',
'pn_speed': '10g', 'pn_jumbo': True, 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 port-config-modify speed 10g port 1,2 jumbo '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_pn_port_config_modify_t2(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_port': 'all',
'pn_host_enable': True, 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 port-config-modify port all host-enable '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_pn_port_config_modify_t3(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_port': '5',
'pn_crc_check_enable': True, 'pn_vxlan_termination': False, 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 port-config-modify port 5 crc-check-enable no-vxlan-termination '
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_pn_port_config_modify_t4(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_port': '10,11,12',
'pn_pause': False, 'pn_enable': True, 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = ' switch sw01 port-config-modify port 10,11,12 no-pause enable '
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 |
ihsanudin/odoo | openerp/addons/base/module/wizard/base_export_language.py | 269 | 3648 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2004-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import contextlib
import cStringIO
from openerp import tools
from openerp.osv import fields,osv
from openerp.tools.translate import _
from openerp.tools.misc import get_iso_codes
NEW_LANG_KEY = '__new__'
class base_language_export(osv.osv_memory):
_name = "base.language.export"
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('translatable', '=', True)])
langs = lang_obj.browse(cr, uid, ids)
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + [(lang.code, lang.name) for lang in langs]
_columns = {
'name': fields.char('File Name', readonly=True),
'lang': fields.selection(_get_languages, 'Language', required=True),
'format': fields.selection([('csv','CSV File'),
('po','PO File'),
('tgz', 'TGZ Archive')], 'File Format', required=True),
'modules': fields.many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', 'Modules To Export', domain=[('state','=','installed')]),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('choose', 'choose'), # choose language
('get', 'get')]) # get the file
}
_defaults = {
'state': 'choose',
'lang': NEW_LANG_KEY,
'format': 'csv',
}
def act_getfile(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids, context=context)[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = sorted(map(lambda m: m.name, this.modules)) or ['all']
with contextlib.closing(cStringIO.StringIO()) as buf:
tools.trans_export(lang, mods, buf, this.format, cr)
out = base64.encodestring(buf.getvalue())
filename = 'new'
if lang:
filename = get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
extension = this.format
if not lang and extension == 'po':
extension = 'pot'
name = "%s.%s" % (filename, extension)
this.write({ 'state': 'get', 'data': out, 'name': name })
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GiovanniConserva/TestDeploy | venv/Lib/site-packages/django/contrib/gis/geos/linestring.py | 76 | 5843 | from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.point import Point
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class LineString(ProjectInterpolateMixin, GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
has_cs = True
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords:
ndim = len(coords[0])
else:
raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in range(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim:
raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
for i in range(ncoords):
if numpy_coords:
cs[i] = coords[i, :]
elif isinstance(coords[i], Point):
cs[i] = coords[i].tuple
else:
cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid')
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3):
raise TypeError('Dimension mismatch.')
# #### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in range(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minlength = 4
_init_func = capi.create_linearring
| bsd-3-clause |
klnprj/testapp | django/contrib/flatpages/tests/templatetags.py | 228 | 5965 | import os
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
class FlatpageTemplateTagTests(TestCase):
fixtures = ['sample_flatpages']
urls = 'django.contrib.flatpages.tests.urls'
def setUp(self):
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
flatpage_middleware_class = 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
if flatpage_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (flatpage_middleware_class,)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
self.me = User.objects.create_user('testuser', 'test@example.com', 's3krit')
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
def test_get_flatpages_tag(self):
"The flatpage template tag retrives unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_anon_user(self):
"The flatpage template tag retrives unregistered flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages for anonuser as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_user(self):
"The flatpage template tag retrives all flatpages for an authenticated user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages for me as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': self.me
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,Sekrit Nested Flatpage,Sekrit Flatpage,")
def test_get_flatpages_with_prefix(self):
"The flatpage template tag retrives unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_anon_user(self):
"The flatpage template tag retrives unregistered prefixed flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for anonuser as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_user(self):
"The flatpage template tag retrive prefixed flatpages for an authenticated user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for me as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': self.me
}))
self.assertEqual(out, "A Nested Flatpage,Sekrit Nested Flatpage,")
def test_get_flatpages_with_variable_prefix(self):
"The prefix for the flatpage template tag can be a template variable"
out = Template(
"{% load flatpages %}"
"{% get_flatpages location_prefix as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'location_prefix': '/location/'
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_parsing_errors(self):
"There are various ways that the flatpages template tag won't parse"
render = lambda t: Template(t).render(Context())
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as flatpages asdf%}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake user as flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages for user as flatpages asdf%}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages prefix for user as flatpages asdf%}")
| bsd-3-clause |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1_modules/rfc8494.py | 13 | 2363 | # This file is being contributed to pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Multicast Email (MULE) over Allied Communications Publication 142
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc8494.txt
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
id_mmhs_CDT = univ.ObjectIdentifier('1.3.26.0.4406.0.4.2')
class AlgorithmID_ShortForm(univ.Integer):
pass
AlgorithmID_ShortForm.namedValues = namedval.NamedValues(
('zlibCompress', 0)
)
class ContentType_ShortForm(univ.Integer):
pass
ContentType_ShortForm.namedValues = namedval.NamedValues(
('unidentified', 0),
('external', 1),
('p1', 2),
('p3', 3),
('p7', 4),
('mule', 25)
)
class CompressedContentInfo(univ.Sequence):
pass
CompressedContentInfo.componentType = namedtype.NamedTypes(
namedtype.NamedType('unnamed', univ.Choice(componentType=namedtype.NamedTypes(
namedtype.NamedType('contentType-ShortForm',
ContentType_ShortForm().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('contentType-OID',
univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
))),
namedtype.NamedType('compressedContent',
univ.OctetString().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CompressionAlgorithmIdentifier(univ.Choice):
pass
CompressionAlgorithmIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithmID-ShortForm',
AlgorithmID_ShortForm().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('algorithmID-OID',
univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class CompressedData(univ.Sequence):
pass
CompressedData.componentType = namedtype.NamedTypes(
namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()),
namedtype.NamedType('compressedContentInfo', CompressedContentInfo())
)
| apache-2.0 |
PeterWangIntel/chromium-crosswalk | tools/perf/metrics/timeline.py | 20 | 12212 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from telemetry.util.statistics import DivideIfPossibleOrZero
from telemetry.value import scalar
from telemetry.web_perf.metrics import timeline_based_metric
class LoadTimesTimelineMetric(timeline_based_metric.TimelineBasedMetric):
def __init__(self):
super(LoadTimesTimelineMetric, self).__init__()
self.report_main_thread_only = True
def AddResults(self, model, renderer_thread, interaction_records, results):
assert model
assert len(interaction_records) == 1, (
'LoadTimesTimelineMetric cannot compute metrics for more than 1 time '
'range.')
interaction_record = interaction_records[0]
if self.report_main_thread_only:
thread_filter = 'CrRendererMain'
else:
thread_filter = None
events_by_name = collections.defaultdict(list)
renderer_process = renderer_thread.parent
for thread in renderer_process.threads.itervalues():
if thread_filter and not thread.name in thread_filter:
continue
thread_name = thread.name.replace('/','_')
for e in thread.IterAllSlicesInRange(interaction_record.start,
interaction_record.end):
events_by_name[e.name].append(e)
for event_name, event_group in events_by_name.iteritems():
times = [event.self_time for event in event_group]
total = sum(times)
biggest_jank = max(times)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_event_name = event_name.replace('.', '_')
full_name = thread_name + '|' + sanitized_event_name
results.AddValue(scalar.ScalarValue(
results.current_page, full_name, 'ms', total))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_max', 'ms', biggest_jank))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_avg', 'ms', total / len(times)))
for counter_name, counter in renderer_process.counters.iteritems():
total = sum(counter.totals)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_counter_name = counter_name.replace('.', '_')
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name, 'count', total))
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name + '_avg', 'count',
total / float(len(counter.totals))))
# We want to generate a consistant picture of our thread usage, despite
# having several process configurations (in-proc-gpu/single-proc).
# Since we can't isolate renderer threads in single-process mode, we
# always sum renderer-process threads' times. We also sum all io-threads
# for simplicity.
TimelineThreadCategories = {
"Chrome_InProcGpuThread": "GPU",
"CrGpuMain" : "GPU",
"AsyncTransferThread" : "GPU_transfer",
"CrBrowserMain" : "browser",
"Browser Compositor" : "browser",
"CrRendererMain" : "renderer_main",
"Compositor" : "renderer_compositor",
"IOThread" : "IO",
"CompositorTileWorker" : "raster",
"DummyThreadName1" : "other",
"DummyThreadName2" : "total_fast_path",
"DummyThreadName3" : "total_all"
}
_MatchBySubString = ["IOThread", "CompositorTileWorker"]
AllThreads = TimelineThreadCategories.values()
NoThreads = []
FastPathThreads = ["GPU", "renderer_compositor", "browser", "IO"]
ReportMainThreadOnly = ["renderer_main"]
ReportSilkDetails = ["renderer_main"]
# TODO(epenner): Thread names above are likely fairly stable but trace names
# could change. We should formalize these traces to keep this robust.
OverheadTraceCategory = "trace_event_overhead"
OverheadTraceName = "overhead"
FrameTraceName = "::SwapBuffers"
FrameTraceThreadName = "renderer_compositor"
IntervalNames = ["frame", "second"]
def Rate(numerator, denominator):
return DivideIfPossibleOrZero(numerator, denominator)
def ClockOverheadForEvent(event):
if (event.category == OverheadTraceCategory and
event.name == OverheadTraceName):
return event.duration
else:
return 0
def CpuOverheadForEvent(event):
if (event.category == OverheadTraceCategory and
event.thread_duration):
return event.thread_duration
else:
return 0
def ThreadCategoryName(thread_name):
thread_category = "other"
for substring, category in TimelineThreadCategories.iteritems():
if substring in _MatchBySubString and substring in thread_name:
thread_category = category
if thread_name in TimelineThreadCategories:
thread_category = TimelineThreadCategories[thread_name]
return thread_category
def ThreadCpuTimeResultName(thread_category, interval_name):
# This isn't a good name, but I don't want to change it and lose continuity.
return "thread_" + thread_category + "_cpu_time_per_" + interval_name
def ThreadTasksResultName(thread_category, interval_name):
return "tasks_per_" + interval_name + "_" + thread_category
def ThreadMeanFrameTimeResultName(thread_category):
return "mean_frame_time_" + thread_category
def ThreadDetailResultName(thread_category, interval_name, detail):
detail_sanitized = detail.replace('.','_')
interval_sanitized = ""
# Special-case per-frame detail names to preserve continuity.
if interval_name == "frame":
interval_sanitized = ""
else:
interval_sanitized = "_per_" + interval_name
return (
"thread_" + thread_category + interval_sanitized + "|" + detail_sanitized)
def ThreadCpuTimeUnits(interval_name):
if interval_name == "second":
return "%"
return "ms"
def ThreadCpuTimeValue(ms_cpu_time_per_interval, interval_name):
# When measuring seconds of CPU time per second of system time, report a %.
if interval_name == "second":
return (ms_cpu_time_per_interval / 1000.0) * 100.0
return ms_cpu_time_per_interval
class ResultsForThread(object):
def __init__(self, model, record_ranges, name):
self.model = model
self.toplevel_slices = []
self.all_slices = []
self.name = name
self.record_ranges = record_ranges
self.all_action_time = \
sum([record_range.bounds for record_range in self.record_ranges])
@property
def clock_time(self):
clock_duration = sum([x.duration for x in self.toplevel_slices])
clock_overhead = sum([ClockOverheadForEvent(x) for x in self.all_slices])
return clock_duration - clock_overhead
@property
def cpu_time(self):
cpu_duration = 0
cpu_overhead = sum([CpuOverheadForEvent(x) for x in self.all_slices])
for x in self.toplevel_slices:
# Only report thread-duration if we have it for all events.
#
# A thread_duration of 0 is valid, so this only returns 0 if it is None.
if x.thread_duration == None:
if not x.duration:
continue
else:
return 0
else:
cpu_duration += x.thread_duration
return cpu_duration - cpu_overhead
def SlicesInActions(self, slices):
slices_in_actions = []
for event in slices:
for record_range in self.record_ranges:
if record_range.ContainsInterval(event.start, event.end):
slices_in_actions.append(event)
break
return slices_in_actions
def AppendThreadSlices(self, thread):
self.all_slices.extend(self.SlicesInActions(thread.all_slices))
self.toplevel_slices.extend(self.SlicesInActions(thread.toplevel_slices))
# Reports cpu-time per interval and tasks per interval.
def AddResults(self, num_intervals, interval_name, results):
cpu_per_interval = Rate(self.cpu_time, num_intervals)
tasks_per_interval = Rate(len(self.toplevel_slices), num_intervals)
results.AddValue(scalar.ScalarValue(
results.current_page,
ThreadCpuTimeResultName(self.name, interval_name),
ThreadCpuTimeUnits(interval_name),
ThreadCpuTimeValue(cpu_per_interval, interval_name)))
results.AddValue(scalar.ScalarValue(
results.current_page,
ThreadTasksResultName(self.name, interval_name),
'tasks', tasks_per_interval))
def AddDetailedResults(self, num_intervals, interval_name, results):
slices_by_category = collections.defaultdict(list)
for s in self.all_slices:
slices_by_category[s.category].append(s)
all_self_times = []
for category, slices_in_category in slices_by_category.iteritems():
self_time = sum([x.self_time for x in slices_in_category])
all_self_times.append(self_time)
self_time_per_interval = Rate(self_time, num_intervals)
results.AddValue(scalar.ScalarValue(
results.current_page,
ThreadDetailResultName(self.name, interval_name, category),
ThreadCpuTimeUnits(interval_name),
ThreadCpuTimeValue(self_time_per_interval, interval_name)))
all_measured_time = sum(all_self_times)
idle_time = max(0, self.all_action_time - all_measured_time)
idle_time_per_interval = Rate(idle_time, num_intervals)
results.AddValue(scalar.ScalarValue(
results.current_page,
ThreadDetailResultName(self.name, interval_name, "idle"),
ThreadCpuTimeUnits(interval_name),
ThreadCpuTimeValue(idle_time_per_interval, interval_name)))
def CountTracesWithName(self, substring):
count = 0
for event in self.all_slices:
if substring in event.name:
count += 1
return count
class ThreadTimesTimelineMetric(timeline_based_metric.TimelineBasedMetric):
def __init__(self):
super(ThreadTimesTimelineMetric, self).__init__()
# Minimal traces, for minimum noise in CPU-time measurements.
self.results_to_report = AllThreads
self.details_to_report = NoThreads
def AddResults(self, model, _, interaction_records, results):
# Set up each thread category for consistant results.
thread_category_results = {}
for name in TimelineThreadCategories.values():
thread_category_results[name] = ResultsForThread(
model, [r.GetBounds() for r in interaction_records], name)
# Group the slices by their thread category.
for thread in model.GetAllThreads():
thread_category = ThreadCategoryName(thread.name)
thread_category_results[thread_category].AppendThreadSlices(thread)
# Group all threads.
for thread in model.GetAllThreads():
thread_category_results['total_all'].AppendThreadSlices(thread)
# Also group fast-path threads.
for thread in model.GetAllThreads():
if ThreadCategoryName(thread.name) in FastPathThreads:
thread_category_results['total_fast_path'].AppendThreadSlices(thread)
# Calculate the interaction's number of frames.
frame_rate_thread = thread_category_results[FrameTraceThreadName]
num_frames = frame_rate_thread.CountTracesWithName(FrameTraceName)
# Calculate the interaction's duration.
all_threads = thread_category_results['total_all']
num_seconds = all_threads.all_action_time / 1000.0
# Report the desired results and details for each interval type.
intervals = [('frame', num_frames), ('second', num_seconds)]
for (interval_name, num_intervals) in intervals:
for thread_results in thread_category_results.values():
if thread_results.name in self.results_to_report:
thread_results.AddResults(num_intervals, interval_name, results)
# TOOD(nduca): When generic results objects are done, this special case
# can be replaced with a generic UI feature.
if thread_results.name in self.details_to_report:
thread_results.AddDetailedResults(
num_intervals, interval_name, results)
# Report mean frame time for the frame rate thread. We could report other
# frame rates (eg. renderer_main) but this might get confusing.
mean_frame_time = Rate(frame_rate_thread.all_action_time, num_frames)
results.AddValue(scalar.ScalarValue(
results.current_page,
ThreadMeanFrameTimeResultName(FrameTraceThreadName),
'ms', mean_frame_time))
| bsd-3-clause |
was4444/chromium.src | third_party/closure_linter/closure_linter/tokenutil_test.py | 109 | 7678 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the scopeutil module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
class FakeToken(object):
pass
class TokenUtilTest(googletest.TestCase):
def testGetTokenRange(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.next = b
b.next = c
c.next = d
self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
# This is an error as e does not come after a in the token chain.
self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
def testTokensToString(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.string = 'aaa'
b.string = 'bbb'
c.string = 'ccc'
d.string = 'ddd'
e.string = 'eee'
a.line_number = 5
b.line_number = 6
c.line_number = 6
d.line_number = 10
e.line_number = 11
self.assertEquals(
'aaa\nbbbccc\n\n\n\nddd\neee',
tokenutil.TokensToString([a, b, c, d, e]))
self.assertEquals(
'ddd\neee\naaa\nbbbccc',
tokenutil.TokensToString([d, e, a, b, c]),
'Neighboring tokens not in line_number order should have a newline '
'between them.')
def testGetPreviousCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
None,
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start1.',
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
def testGetNextCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'end1',
tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
def testGetIdentifierStart(self):
tokens = testutil.TokenizeSource("""
start1 . // comment
prototype. /* another comment */
end1
['edge'][case].prototype.
end2 = function() {}
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
def testInsertTokenBefore(self):
self.AssertInsertTokenAfterBefore(False)
def testInsertTokenAfter(self):
self.AssertInsertTokenAfterBefore(True)
def AssertInsertTokenAfterBefore(self, after):
new_token = javascripttokens.JavaScriptToken(
'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
existing_token1 = javascripttokens.JavaScriptToken(
'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
existing_token1.start_index = 0
existing_token1.metadata = ecmametadatapass.EcmaMetaData()
existing_token2 = javascripttokens.JavaScriptToken(
' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
existing_token2.start_index = 3
existing_token2.metadata = ecmametadatapass.EcmaMetaData()
existing_token2.metadata.last_code = existing_token1
existing_token1.next = existing_token2
existing_token2.previous = existing_token1
if after:
tokenutil.InsertTokenAfter(new_token, existing_token1)
else:
tokenutil.InsertTokenBefore(new_token, existing_token2)
self.assertEquals(existing_token1, new_token.previous)
self.assertEquals(existing_token2, new_token.next)
self.assertEquals(new_token, existing_token1.next)
self.assertEquals(new_token, existing_token2.previous)
self.assertEquals(existing_token1, new_token.metadata.last_code)
self.assertEquals(new_token, existing_token2.metadata.last_code)
self.assertEquals(0, existing_token1.start_index)
self.assertEquals(3, new_token.start_index)
self.assertEquals(4, existing_token2.start_index)
def testGetIdentifierForToken(self):
tokens = testutil.TokenizeSource("""
start1.abc.def.prototype.
onContinuedLine
(start2.abc.def
.hij.klm
.nop)
start3.abc.def
.hij = function() {};
// An absurd multi-liner.
start4.abc.def.
hij.
klm = function() {};
start5 . aaa . bbb . ccc
shouldntBePartOfThePreviousSymbol
start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
var start7 = 42;
function start8() {
}
start9.abc. // why is there a comment here?
def /* another comment */
shouldntBePart
start10.abc // why is there a comment here?
.def /* another comment */
shouldntBePart
start11.abc. middle1.shouldNotBeIdentifier
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1.abc.def.prototype.onContinuedLine',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start2.abc.def.hij.klm.nop',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
self.assertEquals(
'start3.abc.def.hij',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
self.assertEquals(
'start4.abc.def.hij.klm',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
self.assertEquals(
'start5.aaa.bbb.ccc',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
self.assertEquals(
'start6.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
self.assertEquals(
'start7',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
self.assertEquals(
'start8',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
self.assertEquals(
'start9.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
self.assertEquals(
'start10.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
self.assertIsNone(
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
zymsys/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hpsModelFrame.py | 22 | 2075 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris, resample
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
mYst = resample(np.maximum(-200, mXr), mXr.size*stocf) # decimate the mag spectrum
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(2,1,1)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-100,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', lw=2, markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(2,1,2)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.6, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.0, label='mXr')
binFreq = (fs/2.0)*np.arange(mYst.size)/(mYst.size)
plt.plot(binFreq,mYst,'r', lw=1.5, label='mYst')
plt.axis([0,maxplotfreq,-100,max(mYh)+2])
plt.legend(prop={'size':15})
plt.title('mYh + mXr + mYst')
plt.tight_layout()
plt.savefig('hpsModelFrame.png')
plt.show()
| agpl-3.0 |
ESSS/numpy | numpy/distutils/fcompiler/gnu.py | 28 | 14667 | from __future__ import division, absolute_import, print_function
import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n')+1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
filename = sc.get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir,)*4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath="%s"' % dir
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def get_flags_opt(self):
if is_win64():
return ['-O0']
else:
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
| bsd-3-clause |
ANDABO/plugin.video.aresiptv-1.9.4 | pyaes.py | 189 | 16661 | """Simple AES cipher implementation in pure Python following PEP-272 API
Homepage: https://bitbucket.org/intgr/pyaes/
The goal of this module is to be as fast as reasonable in Python while still
being Pythonic and readable/understandable. It is licensed under the permissive
MIT license.
Hopefully the code is readable and commented enough that it can serve as an
introduction to the AES cipher for Python coders. In fact, it should go along
well with the Stick Figure Guide to AES:
http://www.moserware.com/2009/09/stick-figure-guide-to-advanced.html
Contrary to intuition, this implementation numbers the 4x4 matrices from top to
bottom for efficiency reasons::
0 4 8 12
1 5 9 13
2 6 10 14
3 7 11 15
Effectively it's the transposition of what you'd expect. This actually makes
the code simpler -- except the ShiftRows step, but hopefully the explanation
there clears it up.
"""
####
# Copyright (c) 2010 Marti Raudsepp <marti@juffo.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
####
from array import array
# Globals mandated by PEP 272:
# http://www.python.org/dev/peps/pep-0272/
MODE_ECB = 1
MODE_CBC = 2
#MODE_CTR = 6
block_size = 16
key_size = None
def new(key, mode, IV=None):
if mode == MODE_ECB:
return ECBMode(AES(key))
elif mode == MODE_CBC:
if IV is None:
raise ValueError, "CBC mode needs an IV value!"
return CBCMode(AES(key), IV)
else:
raise NotImplementedError
#### AES cipher implementation
class AES(object):
block_size = 16
def __init__(self, key):
self.setkey(key)
def setkey(self, key):
"""Sets the key and performs key expansion."""
self.key = key
self.key_size = len(key)
if self.key_size == 16:
self.rounds = 10
elif self.key_size == 24:
self.rounds = 12
elif self.key_size == 32:
self.rounds = 14
else:
raise ValueError, "Key length must be 16, 24 or 32 bytes"
self.expand_key()
def expand_key(self):
"""Performs AES key expansion on self.key and stores in self.exkey"""
# The key schedule specifies how parts of the key are fed into the
# cipher's round functions. "Key expansion" means performing this
# schedule in advance. Almost all implementations do this.
#
# Here's a description of AES key schedule:
# http://en.wikipedia.org/wiki/Rijndael_key_schedule
# The expanded key starts with the actual key itself
exkey = array('B', self.key)
# extra key expansion steps
if self.key_size == 16:
extra_cnt = 0
elif self.key_size == 24:
extra_cnt = 2
else:
extra_cnt = 3
# 4-byte temporary variable for key expansion
word = exkey[-4:]
# Each expansion cycle uses 'i' once for Rcon table lookup
for i in xrange(1, 11):
#### key schedule core:
# left-rotate by 1 byte
word = word[1:4] + word[0:1]
# apply S-box to all bytes
for j in xrange(4):
word[j] = aes_sbox[word[j]]
# apply the Rcon table to the leftmost byte
word[0] = word[0] ^ aes_Rcon[i]
#### end key schedule core
for z in xrange(4):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
# Last key expansion cycle always finishes here
if len(exkey) >= (self.rounds+1) * self.block_size:
break
# Special substitution step for 256-bit key
if self.key_size == 32:
for j in xrange(4):
# mix in bytes from the last subkey XORed with S-box of
# current word bytes
word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j]
exkey.extend(word)
# Twice for 192-bit key, thrice for 256-bit key
for z in xrange(extra_cnt):
for j in xrange(4):
# mix in bytes from the last subkey
word[j] ^= exkey[-self.key_size + j]
exkey.extend(word)
self.exkey = exkey
def add_round_key(self, block, round):
"""AddRoundKey step in AES. This is where the key is mixed into plaintext"""
offset = round * 16
exkey = self.exkey
for i in xrange(16):
block[i] ^= exkey[offset + i]
#print 'AddRoundKey:', block
def sub_bytes(self, block, sbox):
"""SubBytes step, apply S-box to all bytes
Depending on whether encrypting or decrypting, a different sbox array
is passed in.
"""
for i in xrange(16):
block[i] = sbox[block[i]]
#print 'SubBytes :', block
def shift_rows(self, b):
"""ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3
Since we're performing this on a transposed matrix, cells are numbered
from top to bottom::
0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change
1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around)
2 6 10 14 -> 10 14 2 6 -- shifted by 2
3 7 11 15 -> 15 3 7 11 -- shifted by 3
"""
b[1], b[5], b[ 9], b[13] = b[ 5], b[ 9], b[13], b[ 1]
b[2], b[6], b[10], b[14] = b[10], b[14], b[ 2], b[ 6]
b[3], b[7], b[11], b[15] = b[15], b[ 3], b[ 7], b[11]
#print 'ShiftRows :', b
def shift_rows_inv(self, b):
"""Similar to shift_rows above, but performed in inverse for decryption."""
b[ 5], b[ 9], b[13], b[ 1] = b[1], b[5], b[ 9], b[13]
b[10], b[14], b[ 2], b[ 6] = b[2], b[6], b[10], b[14]
b[15], b[ 3], b[ 7], b[11] = b[3], b[7], b[11], b[15]
#print 'ShiftRows :', b
def mix_columns(self, block):
"""MixColumns step. Mixes the values in each column"""
# Cache global multiplication tables (see below)
mul_by_2 = gf_mul_by_2
mul_by_3 = gf_mul_by_3
# Since we're dealing with a transposed matrix, columns are already
# sequential
for i in xrange(4):
col = i * 4
#v0, v1, v2, v3 = block[col : col+4]
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
block[col + 3])
block[col ] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1]
block[col+1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2]
block[col+2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3]
block[col+3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0]
#print 'MixColumns :', block
def mix_columns_inv(self, block):
"""Similar to mix_columns above, but performed in inverse for decryption."""
# Cache global multiplication tables (see below)
mul_9 = gf_mul_by_9
mul_11 = gf_mul_by_11
mul_13 = gf_mul_by_13
mul_14 = gf_mul_by_14
# Since we're dealing with a transposed matrix, columns are already
# sequential
for i in xrange(4):
col = i * 4
v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
block[col + 3])
#v0, v1, v2, v3 = block[col:col+4]
block[col ] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1]
block[col+1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2]
block[col+2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3]
block[col+3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0]
#print 'MixColumns :', block
def encrypt_block(self, block):
"""Encrypts a single block. This is the main AES function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned.
self.add_round_key(block, 0)
for round in xrange(1, self.rounds):
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
self.mix_columns(block)
self.add_round_key(block, round)
self.sub_bytes(block, aes_sbox)
self.shift_rows(block)
# no mix_columns step in the last round
self.add_round_key(block, self.rounds)
def decrypt_block(self, block):
"""Decrypts a single block. This is the main AES decryption function"""
# For efficiency reasons, the state between steps is transmitted via a
# mutable array, not returned.
self.add_round_key(block, self.rounds)
# count rounds down from 15 ... 1
for round in xrange(self.rounds-1, 0, -1):
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, round)
self.mix_columns_inv(block)
self.shift_rows_inv(block)
self.sub_bytes(block, aes_inv_sbox)
self.add_round_key(block, 0)
# no mix_columns step in the last round
#### ECB mode implementation
class ECBMode(object):
"""Electronic CodeBook (ECB) mode encryption.
Basically this mode applies the cipher function to each block individually;
no feedback is done. NB! This is insecure for almost all purposes
"""
def __init__(self, cipher):
self.cipher = cipher
self.block_size = cipher.block_size
def ecb(self, data, block_func):
"""Perform ECB mode with the given function"""
if len(data) % self.block_size != 0:
raise ValueError, "Plaintext length must be multiple of 16"
block_size = self.block_size
data = array('B', data)
for offset in xrange(0, len(data), block_size):
block = data[offset : offset+block_size]
block_func(block)
data[offset : offset+block_size] = block
return data.tostring()
def encrypt(self, data):
"""Encrypt data in ECB mode"""
return self.ecb(data, self.cipher.encrypt_block)
def decrypt(self, data):
"""Decrypt data in ECB mode"""
return self.ecb(data, self.cipher.decrypt_block)
#### CBC mode
class CBCMode(object):
"""Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks.
In CBC encryption, each plaintext block is XORed with the ciphertext block
preceding it; decryption is simply the inverse.
"""
# A better explanation of CBC can be found here:
# http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29
def __init__(self, cipher, IV):
self.cipher = cipher
self.block_size = cipher.block_size
self.IV = array('B', IV)
def encrypt(self, data):
"""Encrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError, "Plaintext length must be multiple of 16"
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
block = data[offset : offset+block_size]
# Perform CBC chaining
for i in xrange(block_size):
block[i] ^= IV[i]
self.cipher.encrypt_block(block)
data[offset : offset+block_size] = block
IV = block
self.IV = IV
return data.tostring()
def decrypt(self, data):
"""Decrypt data in CBC mode"""
block_size = self.block_size
if len(data) % block_size != 0:
raise ValueError, "Ciphertext length must be multiple of 16"
data = array('B', data)
IV = self.IV
for offset in xrange(0, len(data), block_size):
ctext = data[offset : offset+block_size]
block = ctext[:]
self.cipher.decrypt_block(block)
# Perform CBC chaining
#for i in xrange(block_size):
# data[offset + i] ^= IV[i]
for i in xrange(block_size):
block[i] ^= IV[i]
data[offset : offset+block_size] = block
IV = ctext
#data[offset : offset+block_size] = block
self.IV = IV
return data.tostring()
####
def galois_multiply(a, b):
"""Galois Field multiplicaiton for AES"""
p = 0
while b:
if b & 1:
p ^= a
a <<= 1
if a & 0x100:
a ^= 0x1b
b >>= 1
return p & 0xff
# Precompute the multiplication tables for encryption
gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)])
gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)])
# ... for decryption
gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)])
gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)])
gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)])
gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)])
####
# The S-box is a 256-element array, that maps a single byte value to another
# byte value. Since it's designed to be reversible, each value occurs only once
# in the S-box
#
# More information: http://en.wikipedia.org/wiki/Rijndael_S-box
aes_sbox = array('B',
'637c777bf26b6fc53001672bfed7ab76'
'ca82c97dfa5947f0add4a2af9ca472c0'
'b7fd9326363ff7cc34a5e5f171d83115'
'04c723c31896059a071280e2eb27b275'
'09832c1a1b6e5aa0523bd6b329e32f84'
'53d100ed20fcb15b6acbbe394a4c58cf'
'd0efaafb434d338545f9027f503c9fa8'
'51a3408f929d38f5bcb6da2110fff3d2'
'cd0c13ec5f974417c4a77e3d645d1973'
'60814fdc222a908846eeb814de5e0bdb'
'e0323a0a4906245cc2d3ac629195e479'
'e7c8376d8dd54ea96c56f4ea657aae08'
'ba78252e1ca6b4c6e8dd741f4bbd8b8a'
'703eb5664803f60e613557b986c11d9e'
'e1f8981169d98e949b1e87e9ce5528df'
'8ca1890dbfe6426841992d0fb054bb16'.decode('hex')
)
# This is the inverse of the above. In other words:
# aes_inv_sbox[aes_sbox[val]] == val
aes_inv_sbox = array('B',
'52096ad53036a538bf40a39e81f3d7fb'
'7ce339829b2fff87348e4344c4dee9cb'
'547b9432a6c2233dee4c950b42fac34e'
'082ea16628d924b2765ba2496d8bd125'
'72f8f66486689816d4a45ccc5d65b692'
'6c704850fdedb9da5e154657a78d9d84'
'90d8ab008cbcd30af7e45805b8b34506'
'd02c1e8fca3f0f02c1afbd0301138a6b'
'3a9111414f67dcea97f2cfcef0b4e673'
'96ac7422e7ad3585e2f937e81c75df6e'
'47f11a711d29c5896fb7620eaa18be1b'
'fc563e4bc6d279209adbc0fe78cd5af4'
'1fdda8338807c731b11210592780ec5f'
'60517fa919b54a0d2de57a9f93c99cef'
'a0e03b4dae2af5b0c8ebbb3c83539961'
'172b047eba77d626e169146355210c7d'.decode('hex')
)
# The Rcon table is used in AES's key schedule (key expansion)
# It's a pre-computed table of exponentation of 2 in AES's finite field
#
# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule
aes_Rcon = array('B',
'8d01020408102040801b366cd8ab4d9a'
'2f5ebc63c697356ad4b37dfaefc59139'
'72e4d3bd61c29f254a943366cc831d3a'
'74e8cb8d01020408102040801b366cd8'
'ab4d9a2f5ebc63c697356ad4b37dfaef'
'c5913972e4d3bd61c29f254a943366cc'
'831d3a74e8cb8d01020408102040801b'
'366cd8ab4d9a2f5ebc63c697356ad4b3'
'7dfaefc5913972e4d3bd61c29f254a94'
'3366cc831d3a74e8cb8d010204081020'
'40801b366cd8ab4d9a2f5ebc63c69735'
'6ad4b37dfaefc5913972e4d3bd61c29f'
'254a943366cc831d3a74e8cb8d010204'
'08102040801b366cd8ab4d9a2f5ebc63'
'c697356ad4b37dfaefc5913972e4d3bd'
'61c29f254a943366cc831d3a74e8cb'.decode('hex')
) | gpl-2.0 |
drcapulet/sentry | src/sentry/receivers/rules.py | 15 | 1174 | from __future__ import absolute_import, print_function
from django.db.models.signals import post_save
from sentry.models import Project, Rule
def create_default_rules(instance, created=True, RuleModel=Rule, **kwargs):
if not created:
return
RuleModel.objects.create(
project=instance,
label='Send a notification for new events',
data={
'conditions': [
{'id': 'sentry.rules.conditions.first_seen_event.FirstSeenEventCondition'},
],
'actions': [
{'id': 'sentry.rules.actions.notify_event.NotifyEventAction'},
],
},
)
RuleModel.objects.create(
project=instance,
label='Send a notification for regressions',
data={
'conditions': [
{'id': 'sentry.rules.conditions.regression_event.RegressionEventCondition'},
],
'actions': [
{'id': 'sentry.rules.actions.notify_event.NotifyEventAction'},
],
}
)
post_save.connect(
create_default_rules,
sender=Project,
dispatch_uid="create_default_rules",
weak=False,
)
| bsd-3-clause |
acreations/rockit-server | rockit/core/views/settings.py | 1 | 1235 |
from celery.execute import send_task
from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.reverse import reverse_lazy
from rockit.core import holders
from rockit.core import models
class SettingViewSet(viewsets.ViewSet):
"""
View to list all settings in rockit server.
"""
def list(self, request):
"""
Return a list of all settings.
"""
result = list()
for association in models.Association.objects.all():
result.append({
'name': association.name,
'entry': association.entry,
'url': reverse_lazy("setting-detail", kwargs={ 'pk': association.id }, request=request)
})
return Response(result)
def retrieve(self, request, pk=None):
"""
Get settings for a specific association
"""
queryset = models.Association.objects.all()
association = get_object_or_404(queryset, pk=pk)
settings = send_task("%s.settings" % association.entry, args=[holders.SettingsHolder()])
result = settings.wait()
return Response(result.get_content()) | mit |
LyleMi/Trafficker | Trafficker/layer/layer.py | 1 | 2062 | import socket
class layer(object):
"""base layer"""
def __init__(self, packet=''):
self.packet = packet
def pack(self):
return self.packet
def __str__(self):
return str(self.pack())
def __repr__(self):
return '<%s>' % self.name
def json(self):
return {}
@property
def name(self):
return self.__class__.__name__
@staticmethod
def send(layers, port=0, device='eth0'):
packet = ''.join([p.pack() for p in layers])
if len(packet) < 60:
packet += '\x00' * (60 - len(packet))
self.hexdump(packet)
return '=== TEST ==='
rawSocket = socket.socket(
socket.PF_PACKET, socket.SOCK_RAW, socket.htons(port)
)
rawSocket.bind((device, socket.htons(port)))
rawSocket.send(packet)
return rawSocket
@staticmethod
def calChecksum(data):
s = 0
n = len(data) % 2
for i in range(0, len(data)-n, 2):
s += data[i] + (data[i+1] << 8)
if n:
s += data[i+1]
while (s >> 16):
s = (s & 0xFFFF) + (s >> 16)
s = ~s & 0xffff
return s
@staticmethod
def hexdump(src, length=16, show=True):
result = []
digits = 4 if isinstance(src, unicode) else 2
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = b' '.join(['%0*X' % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) < 0x7F else b'.' for x in s])
result.append(b'%04X %-*s %s' %
(i, length*(digits + 1), hexa, text))
if show:
print(b'\n'.join(result))
else:
return b'\n'.join(result)
@staticmethod
def parseMac(s, encode=False):
if encode:
s = s.hex()
tmp = []
for i in range(len(s)//2):
tmp.append(s[i*2:(i+1)*2])
return ':'.join(tmp)
return bytes.fromhex(s.replace(':', ''))
| mit |
Bitcoin-ABC/bitcoin-abc | test/functional/abc-magnetic-anomaly-mining.py | 1 | 4364 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that mining RPC continues to supply correct transaction metadata after
the Nov 2018 protocol upgrade which engages canonical transaction ordering
"""
import decimal
import random
import time
from test_framework.test_framework import BitcoinTestFramework
class CTORMiningTest(BitcoinTestFramework):
def set_test_params(self):
# Setup two nodes so we can getblocktemplate
# it errors out if it is not connected to other nodes
self.num_nodes = 2
self.setup_clean_chain = True
self.block_heights = {}
self.tip = None
self.blocks = {}
self.mocktime = int(time.time()) - 600 * 100
extra_arg = ['-spendzeroconfchange=0', '-whitelist=noban@127.0.0.1']
self.extra_args = [extra_arg, extra_arg]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
mining_node = self.nodes[0]
# Helper for updating the times
def update_time():
mining_node.setmocktime(self.mocktime)
self.mocktime = self.mocktime + 600
mining_node.getnewaddress()
# Generate some unspent utxos and also
# activate magnetic anomaly
for x in range(150):
update_time()
mining_node.generate(1)
update_time()
unspent = mining_node.listunspent()
transactions = {}
# Spend all our coinbases
while len(unspent):
inputs = []
# Grab a random number of inputs
for _ in range(random.randrange(1, 5)):
txin = unspent.pop()
inputs.append({
'txid': txin['txid'],
'vout': 0 # This is a coinbase
})
if len(unspent) == 0:
break
outputs = {}
# Calculate a unique fee for this transaction
fee = decimal.Decimal(random.randint(
1000, 2000)) / decimal.Decimal(1e2)
# Spend to the same number of outputs as inputs, so we can leave
# the amounts unchanged and avoid rounding errors. This also ensures
# the number of sigops == number of sigchecks.
#
# NOTE: There will be 1 sigop per output (which equals the number
# of inputs now). We need this randomization to ensure the
# numbers are properly following the transactions in the block
# template metadata
addr = ""
for _ in range(len(inputs)):
addr = mining_node.getnewaddress()
output = {
# 50 BCH per coinbase
addr: decimal.Decimal(50000000)
}
outputs.update(output)
# Take the fee off the last output to avoid rounding errors we
# need the exact fee later for assertions
outputs[addr] -= fee
rawtx = mining_node.createrawtransaction(inputs, outputs)
signedtx = mining_node.signrawtransactionwithwallet(rawtx)
txid = mining_node.sendrawtransaction(signedtx['hex'])
# number of outputs is the same as the number of sigops in this
# case
transactions.update({txid: {'fee': fee, 'sigops': len(outputs)}})
tmpl = mining_node.getblocktemplate()
assert 'proposal' in tmpl['capabilities']
# Check the template transaction metadata and ordering
last_txid = 0
for txn in tmpl['transactions'][1:]:
txid = txn['txid']
txnMetadata = transactions[txid]
expectedFeeSats = int(txnMetadata['fee'] * 10**2)
expectedSigOps = txnMetadata['sigops']
txid_decoded = int(txid, 16)
# Assert we got the expected metadata
assert expectedFeeSats == txn['fee']
assert expectedSigOps == txn['sigops']
# Assert transaction ids are in order
assert last_txid == 0 or last_txid < txid_decoded
last_txid = txid_decoded
if __name__ == '__main__':
CTORMiningTest().main()
| mit |
muntasirsyed/intellij-community | python/lib/Lib/encodings/cp1254.py | 593 | 13758 | """ Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1254',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
PulsePod/evepod | lib/python2.7/site-packages/simplejson/tests/test_namedtuple.py | 147 | 4004 | from __future__ import absolute_import
import unittest
import simplejson as json
from simplejson.compat import StringIO
try:
from collections import namedtuple
except ImportError:
class Value(tuple):
def __new__(cls, *args):
return tuple.__new__(cls, args)
def _asdict(self):
return {'value': self[0]}
class Point(tuple):
def __new__(cls, *args):
return tuple.__new__(cls, args)
def _asdict(self):
return {'x': self[0], 'y': self[1]}
else:
Value = namedtuple('Value', ['value'])
Point = namedtuple('Point', ['x', 'y'])
class DuckValue(object):
def __init__(self, *args):
self.value = Value(*args)
def _asdict(self):
return self.value._asdict()
class DuckPoint(object):
def __init__(self, *args):
self.point = Point(*args)
def _asdict(self):
return self.point._asdict()
class DeadDuck(object):
_asdict = None
class DeadDict(dict):
_asdict = None
CONSTRUCTORS = [
lambda v: v,
lambda v: [v],
lambda v: [{'key': v}],
]
class TestNamedTuple(unittest.TestCase):
def test_namedtuple_dumps(self):
for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
d = v._asdict()
self.assertEqual(d, json.loads(json.dumps(v)))
self.assertEqual(
d,
json.loads(json.dumps(v, namedtuple_as_object=True)))
self.assertEqual(d, json.loads(json.dumps(v, tuple_as_array=False)))
self.assertEqual(
d,
json.loads(json.dumps(v, namedtuple_as_object=True,
tuple_as_array=False)))
def test_namedtuple_dumps_false(self):
for v in [Value(1), Point(1, 2)]:
l = list(v)
self.assertEqual(
l,
json.loads(json.dumps(v, namedtuple_as_object=False)))
self.assertRaises(TypeError, json.dumps, v,
tuple_as_array=False, namedtuple_as_object=False)
def test_namedtuple_dump(self):
for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
d = v._asdict()
sio = StringIO()
json.dump(v, sio)
self.assertEqual(d, json.loads(sio.getvalue()))
sio = StringIO()
json.dump(v, sio, namedtuple_as_object=True)
self.assertEqual(
d,
json.loads(sio.getvalue()))
sio = StringIO()
json.dump(v, sio, tuple_as_array=False)
self.assertEqual(d, json.loads(sio.getvalue()))
sio = StringIO()
json.dump(v, sio, namedtuple_as_object=True,
tuple_as_array=False)
self.assertEqual(
d,
json.loads(sio.getvalue()))
def test_namedtuple_dump_false(self):
for v in [Value(1), Point(1, 2)]:
l = list(v)
sio = StringIO()
json.dump(v, sio, namedtuple_as_object=False)
self.assertEqual(
l,
json.loads(sio.getvalue()))
self.assertRaises(TypeError, json.dump, v, StringIO(),
tuple_as_array=False, namedtuple_as_object=False)
def test_asdict_not_callable_dump(self):
for f in CONSTRUCTORS:
self.assertRaises(TypeError,
json.dump, f(DeadDuck()), StringIO(), namedtuple_as_object=True)
sio = StringIO()
json.dump(f(DeadDict()), sio, namedtuple_as_object=True)
self.assertEqual(
json.dumps(f({})),
sio.getvalue())
def test_asdict_not_callable_dumps(self):
for f in CONSTRUCTORS:
self.assertRaises(TypeError,
json.dumps, f(DeadDuck()), namedtuple_as_object=True)
self.assertEqual(
json.dumps(f({})),
json.dumps(f(DeadDict()), namedtuple_as_object=True))
| apache-2.0 |
etherkit/OpenBeacon2 | client/linux-x86/venv/lib/python3.8/site-packages/setuptools/command/test.py | 5 | 9602 | import os
import operator
import sys
import contextlib
import itertools
import unittest
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages, evaluate_marker,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from .build_py import _unique_everseen
__metaclass__ = type
class ScanningLoader(TestLoader):
def __init__(self):
TestLoader.__init__(self)
self._visited = set()
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
if module in self._visited:
return None
self._visited.add(module)
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty:
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build (deprecated)"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Run single test, case or suite (e.g. 'module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if not self.test_suite and sys.version_info >= (2, 7):
yield 'discover'
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(_unique_everseen(paths))
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires)
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
er_d = dist.fetch_build_eggs(
v for k, v in dist.extras_require.items()
if k.startswith(':') and evaluate_marker(k[1:])
)
return itertools.chain(ir_d, tr_d, er_d)
def run(self):
self.announce(
"WARNING: Testing via this command is deprecated and will be "
"removed in a future version. Users looking for a generic test "
"entry point independent of test runner are encouraged to use "
"tox.",
log.WARN,
)
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
test = unittest.main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
exit=False,
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| gpl-3.0 |
oinopion/django | django/views/generic/base.py | 281 | 7690 | from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch, reverse
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.decorators import classonlymethod
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = False
url = None
pattern_name = None
query_string = False
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', request.path,
extra={
'status_code': 410,
'request': request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
| bsd-3-clause |
thnee/ansible | lib/ansible/modules/network/cloudengine/ce_lldp_interface.py | 11 | 69893 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ce_lldp_interface
version_added: "2.10"
short_description: Manages INTERFACE LLDP configuration on HUAWEI CloudEngine switches.
description:
- Manages INTERFACE LLDP configuration on HUAWEI CloudEngine switches.
author: xuxiaowei0512 (@CloudEngine-Ansible)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
lldpenable:
description:
- Set global LLDP enable state.
type: str
choices: ['enabled', 'disabled']
function_lldp_interface_flag:
description:
- Used to distinguish between command line functions.
type: str
choices: ['disableINTERFACE','tlvdisableINTERFACE','tlvenableINTERFACE','intervalINTERFACE']
type_tlv_disable:
description:
- Used to distinguish between command line functions.
type: str
choices: ['basic_tlv', 'dot3_tlv']
type_tlv_enable:
description:
- Used to distinguish between command line functions.
type: str
choices: ['dot1_tlv','dcbx']
lldpadminstatus:
description:
- Set interface lldp enable state.
type: str
choices: ['txOnly', 'rxOnly', 'txAndRx', 'disabled']
ifname:
description:
- Interface name.
type: str
txinterval:
description:
- LLDP send message interval.
type: int
txprotocolvlanid:
description:
- Set tx protocol vlan id.
type: int
txvlannameid:
description:
- Set tx vlan name id.
type: int
vlannametxenable:
description:
- Set vlan name tx enable or not.
type: bool
manaddrtxenable:
description:
- Make it able to send management address TLV.
type: bool
portdesctxenable:
description:
- Enabling the ability to send a description of TLV.
type: bool
syscaptxenable:
description:
- Enable the ability to send system capabilities TLV.
type: bool
sysdesctxenable:
description:
- Enable the ability to send system description TLV.
type: bool
sysnametxenable:
description:
- Enable the ability to send system name TLV.
type: bool
portvlantxenable:
description:
- Enable port vlan tx.
type: bool
protovlantxenable:
description:
- Enable protocol vlan tx.
type: bool
protoidtxenable:
description:
- Enable the ability to send protocol identity TLV.
type: bool
macphytxenable:
description:
- Enable MAC/PHY configuration and state TLV to be sent.
type: bool
linkaggretxenable:
description:
- Enable the ability to send link aggregation TLV.
type: bool
maxframetxenable:
description:
- Enable the ability to send maximum frame length TLV.
type: bool
eee:
description:
- Enable the ability to send EEE TLV.
type: bool
dcbx:
description:
- Enable the ability to send DCBX TLV.
type: bool
state:
description:
- Manage the state of the resource.
type: str
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Configure global LLDP enable state"
ce_lldp_interface_interface:
lldpenable: enabled
- name: "Configure interface lldp enable state"
ce_lldp_interface:
function_lldp_interface_flag: disableINTERFACE
ifname: 10GE1/0/1
lldpadminstatus: rxOnly
- name: "Configure LLDP transmit interval and ensure global LLDP state is already enabled"
ce_lldp_interface:
function_lldp_interface_flag: intervalINTERFACE
ifname: 10GE1/0/1
txinterval: 4
- name: "Configure basic-tlv: management-address TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
manaddrtxenable: true
- name: "Configure basic-tlv: prot description TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
portdesctxenable: true
- name: "Configure basic-tlv: system capabilities TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
syscaptxenable: true
- name: "Configure basic-tlv: system description TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
sysdesctxenable: true
- name: "Configure basic-tlv: system name TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: basic_tlv
ifname: 10GE1/0/1
sysnametxenable: true
- name: "TLV types that are forbidden to be published on the configuration interface, link aggregation TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
linkAggreTxEnable: true
- name: "TLV types that are forbidden to be published on the configuration interface, MAC/PHY configuration/status TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
macPhyTxEnable: true
- name: "TLV types that are forbidden to be published on the configuration interface, maximum frame size TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
maxFrameTxEnable: true
- name: "TLV types that are forbidden to be published on the configuration interface, EEE TLV"
ce_lldp_interface:
function_lldp_interface_flag: tlvdisableINTERFACE
type_tlv_disable: dot3_tlv
ifname: 10GE1/0/1
eee: true
- name: "Configure the interface to publish an optional DCBX TLV type "
ce_lldp_interface:
function_lldp_interface_flag: tlvenableINTERFACE
ifname: 10GE1/0/1
type_tlv_enable: dcbx
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"lldpadminstatus": "rxOnly",
"function_lldp_interface_flag": "tlvenableINTERFACE",
"type_tlv_enable": "dot1_tlv",
"ifname": "10GE1/0/1",
"state": "present"
}
existing:
description: k/v pairs of existing global LLDP configration
returned: always
type: dict
sample: {
"lldpenable": "disabled",
"ifname": "10GE1/0/1",
"lldpadminstatus": "txAndRx"
}
end_state:
description: k/v pairs of global DLDP configration after module execution
returned: always
type: dict
sample: {
"lldpenable": "enabled",
"lldpadminstatus": "rxOnly",
"function_lldp_interface_flag": "tlvenableINTERFACE",
"type_tlv_enable": "dot1_tlv",
"ifname": "10GE1/0/1"
}
updates:
description: command sent to the device
returned: always
type: list
sample: [
"lldp enable",
"interface 10ge 1/0/1",
"undo lldp disable",
"lldp tlv-enable dot1-tlv vlan-name 4",
]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import copy
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import set_nc_config, get_nc_config
CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys>
<lldpEnable></lldpEnable>
</lldpSys>
</lldp>
</filter>
"""
CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys operation="merge">
<lldpEnable>%s</lldpEnable>
</lldpSys>
</lldp>
</config>
"""
CE_NC_GET_INTERFACE_LLDP_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<lldpAdminStatus></lldpAdminStatus>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_MERGE_INTERFACE_LLDP_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface operation="merge">
<ifName>%s</ifName>
<lldpAdminStatus>%s</lldpAdminStatus>
</lldpInterface>
</lldpInterfaces>
</lldp>
</config>
"""
CE_NC_GET_INTERFACE_INTERVAl_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<msgInterval>
<txInterval></txInterval>
</msgInterval>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_MERGE_INTERFACE_INTERVAl_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName>%s</ifName>
<msgInterval operation="merge">
<txInterval>%s</txInterval>
</msgInterval>
</lldpInterface>
</lldpInterfaces>
</lldp>
</config>
"""
CE_NC_GET_INTERFACE_TLV_ENABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<tlvTxEnable>
<dcbx></dcbx>
<protoIdTxEnable></protoIdTxEnable>
</tlvTxEnable>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_GET_INTERFACE_TLV_DISABLE_CONFIG = """
<filter type="subtree">
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName></ifName>
<tlvTxEnable>
<manAddrTxEnable></manAddrTxEnable>
<portDescTxEnable></portDescTxEnable>
<sysCapTxEnable></sysCapTxEnable>
<sysDescTxEnable></sysDescTxEnable>
<sysNameTxEnable></sysNameTxEnable>
<linkAggreTxEnable></linkAggreTxEnable>
<macPhyTxEnable></macPhyTxEnable>
<maxFrameTxEnable></maxFrameTxEnable>
<eee></eee>
</tlvTxEnable>
</lldpInterface>
</lldpInterfaces>
</lldp>
</filter>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpInterfaces>
<lldpInterface>
<ifName>%s</ifName>
<tlvTxEnable operation="merge">
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_PROTOIDTXENABLE = """
<protoIdTxEnable>%s</protoIdTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_DCBX = """
<dcbx>%s</dcbx>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MANADDRTXENABLE = """
<manAddrTxEnable>%s</manAddrTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_PORTDESCTXENABLE = """
<portDescTxEnable>%s</portDescTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSCAPTXENABLE = """
<sysCapTxEnable>%s</sysCapTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSDESCTXENABLE = """
<sysDescTxEnable>%s</sysDescTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSNAMETXENABLE = """
<sysNameTxEnable>%s</sysNameTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_LINKAGGRETXENABLE = """
<linkAggreTxEnable>%s</linkAggreTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MACPHYTXENABLE = """
<macPhyTxEnable>%s</macPhyTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MAXFRAMETXENABLE = """
<maxFrameTxEnable>%s</maxFrameTxEnable>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_EEE = """
<eee>%s</eee>
"""
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL = """
</tlvTxEnable>
</lldpInterface>
</lldpInterfaces>
</lldp>
</config>
"""
CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG = """
<config>
<lldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lldpSys operation="merge">
<lldpEnable>%s</lldpEnable>
</lldpSys>
</lldp>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE"""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('PORT-GROUP'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Lldp_interface(object):
"""Manage global lldp enable configuration"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
self.lldpenable = self.module.params['lldpenable'] or None
self.function_lldp_interface_flag = self.module.params['function_lldp_interface_flag']
self.type_tlv_disable = self.module.params['type_tlv_disable']
self.type_tlv_enable = self.module.params['type_tlv_enable']
self.ifname = self.module.params['ifname']
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.ifname = self.module.params['ifname']
self.lldpadminstatus = self.module.params['lldpadminstatus']
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
self.ifname = self.module.params['ifname']
self.manaddrtxenable = self.module.params['manaddrtxenable']
self.portdesctxenable = self.module.params['portdesctxenable']
self.syscaptxenable = self.module.params['syscaptxenable']
self.sysdesctxenable = self.module.params['sysdesctxenable']
self.sysnametxenable = self.module.params['sysnametxenable']
if self.type_tlv_disable == 'dot3_tlv':
self.ifname = self.module.params['ifname']
self.macphytxenable = self.module.params['macphytxenable']
self.linkaggretxenable = self.module.params['linkaggretxenable']
self.maxframetxenable = self.module.params['maxframetxenable']
self.eee = self.module.params['eee']
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
self.ifname = self.module.params['ifname']
self.protoidtxenable = self.module.params['protoidtxenable']
if self.type_tlv_enable == 'dcbx':
self.ifname = self.module.params['ifname']
self.dcbx = self.module.params['dcbx']
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
self.ifname = self.module.params['ifname']
self.txinterval = self.module.params['txinterval']
self.state = self.module.params['state']
self.lldp_conf = dict()
self.conf_disable_exsit = False
self.conf_interface_lldp_disable_exsit = False
self.conf_interval_exsit = False
self.conf_tlv_disable_exsit = False
self.conf_tlv_enable_exsit = False
self.enable_flag = 0
self.check_params()
self.existing_state_value = dict()
self.existing_end_state_value = dict()
self.interface_lldp_info = list()
# state
self.changed = False
self.proposed_changed = dict()
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_params(self):
"""Check all input params"""
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(msg='Error: ifname name of %s is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(msg='Error: Ifname length is beetween 1 and 63.')
if self.function_lldp_interface_flag == 'intervalINTERFACE':
if self.txinterval:
if int(self.txinterval) < 1 or int(self.txinterval) > 32768:
self.module.fail_json(
msg='Error: The value of txinterval is out of [1 - 32768].')
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(
msg='Error: ifname name of %s '
'is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(
msg='Error: Ifname length is beetween 1 and 63.')
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'dot1_tlv':
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(
msg='Error: ifname name of %s '
'is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(
msg='Error: Ifname length is beetween 1 and 63.')
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
intf_type = get_interface_type(self.ifname)
if not intf_type:
self.module.fail_json(
msg='Error: ifname name of %s '
'is error.' % self.ifname)
if (len(self.ifname) < 1) or (len(self.ifname) > 63):
self.module.fail_json(
msg='Error: Ifname length is beetween 1 and 63.')
def check_response(self, xml_str, xml_name):
"""Check if response message is already OK"""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def get_lldp_enable_pre_config(self):
"""Get lldp enable configure"""
lldp_dict = dict()
lldp_config = list()
conf_enable_str = CE_NC_GET_GLOBAL_LLDPENABLE_CONFIG
conf_enable_obj = get_nc_config(self.module, conf_enable_str)
xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get lldp enable config info
root_enable = ElementTree.fromstring(xml_enable_str)
ntpsite_enable = root_enable.findall("lldp/lldpSys")
for nexthop_enable in ntpsite_enable:
for ele_enable in nexthop_enable:
if ele_enable.tag in ["lldpEnable"]:
lldp_dict[ele_enable.tag] = ele_enable.text
if lldp_dict['lldpEnable'] == 'enabled':
self.enable_flag = 1
lldp_config.append(dict(lldpenable=lldp_dict['lldpEnable']))
return lldp_config
def get_interface_lldp_disable_pre_config(self):
"""Get interface undo lldp disable configure"""
lldp_dict = dict()
interface_lldp_disable_dict = dict()
if self.enable_flag == 1:
conf_enable_str = CE_NC_GET_INTERFACE_LLDP_CONFIG
conf_enable_obj = get_nc_config(self.module, conf_enable_str)
if "<data/>" in conf_enable_obj:
return
xml_enable_str = conf_enable_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_enable_str)
lldp_disable_enable = root.findall("lldp/lldpInterfaces/lldpInterface")
for nexthop_enable in lldp_disable_enable:
name = nexthop_enable.find("ifName")
status = nexthop_enable.find("lldpAdminStatus")
if name is not None and status is not None:
interface_lldp_disable_dict[name.text] = status.text
return interface_lldp_disable_dict
def get_interface_lldp_disable_config(self):
lldp_config = list()
interface_lldp_disable_dict_tmp = dict()
if self.state == "present":
if self.ifname:
interface_lldp_disable_dict_tmp = self.get_interface_lldp_disable_pre_config()
key_list = interface_lldp_disable_dict_tmp.keys()
if len(key_list) != 0:
for key in key_list:
if key == self.ifname:
if interface_lldp_disable_dict_tmp[key] != self.lldpadminstatus:
self.conf_interface_lldp_disable_exsit = True
else:
self.conf_interface_lldp_disable_exsit = False
elif self.ifname not in key_list:
self.conf_interface_lldp_disable_exsit = True
elif (len(key_list) == 0) and self.ifname and self.lldpadminstatus:
self.conf_interface_lldp_disable_exsit = True
lldp_config.append(interface_lldp_disable_dict_tmp)
return lldp_config
def get_interface_tlv_disable_config(self):
lldp_config = list()
lldp_dict = dict()
cur_interface_mdn_cfg = dict()
exp_interface_mdn_cfg = dict()
if self.enable_flag == 1:
conf_str = CE_NC_GET_INTERFACE_TLV_DISABLE_CONFIG
conf_obj = get_nc_config(self.module, conf_str)
if "<data/>" in conf_obj:
return lldp_config
xml_str = conf_obj.replace('\r', '').replace('\n', '')
xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "")
xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
lldp_tlvdisable_ifname = root.findall("lldp/lldpInterfaces/lldpInterface")
for ele in lldp_tlvdisable_ifname:
ifname_tmp = ele.find("ifName")
manaddrtxenable_tmp = ele.find("tlvTxEnable/manAddrTxEnable")
portdesctxenable_tmp = ele.find("tlvTxEnable/portDescTxEnable")
syscaptxenable_tmp = ele.find("tlvTxEnable/sysCapTxEnable")
sysdesctxenable_tmp = ele.find("tlvTxEnable/sysDescTxEnable")
sysnametxenable_tmp = ele.find("tlvTxEnable/sysNameTxEnable")
linkaggretxenable_tmp = ele.find("tlvTxEnable/linkAggreTxEnable")
macphytxenable_tmp = ele.find("tlvTxEnable/macPhyTxEnable")
maxframetxenable_tmp = ele.find("tlvTxEnable/maxFrameTxEnable")
eee_tmp = ele.find("tlvTxEnable/eee")
if ifname_tmp is not None:
if ifname_tmp.text is not None:
cur_interface_mdn_cfg["ifname"] = ifname_tmp.text
if ifname_tmp is not None and manaddrtxenable_tmp is not None:
if manaddrtxenable_tmp.text is not None:
cur_interface_mdn_cfg["manaddrtxenable"] = manaddrtxenable_tmp.text
if ifname_tmp is not None and portdesctxenable_tmp is not None:
if portdesctxenable_tmp.text is not None:
cur_interface_mdn_cfg['portdesctxenable'] = portdesctxenable_tmp.text
if ifname_tmp is not None and syscaptxenable_tmp is not None:
if syscaptxenable_tmp.text is not None:
cur_interface_mdn_cfg['syscaptxenable'] = syscaptxenable_tmp.text
if ifname_tmp is not None and sysdesctxenable_tmp is not None:
if sysdesctxenable_tmp.text is not None:
cur_interface_mdn_cfg['sysdesctxenable'] = sysdesctxenable_tmp.text
if ifname_tmp is not None and sysnametxenable_tmp is not None:
if sysnametxenable_tmp.text is not None:
cur_interface_mdn_cfg['sysnametxenable'] = sysnametxenable_tmp.text
if ifname_tmp is not None and linkaggretxenable_tmp is not None:
if linkaggretxenable_tmp.text is not None:
cur_interface_mdn_cfg['linkaggretxenable'] = linkaggretxenable_tmp.text
if ifname_tmp is not None and macphytxenable_tmp is not None:
if macphytxenable_tmp.text is not None:
cur_interface_mdn_cfg['macphytxenable'] = macphytxenable_tmp.text
if ifname_tmp is not None and maxframetxenable_tmp is not None:
if maxframetxenable_tmp.text is not None:
cur_interface_mdn_cfg['maxframetxenable'] = maxframetxenable_tmp.text
if ifname_tmp is not None and eee_tmp is not None:
if eee_tmp.text is not None:
cur_interface_mdn_cfg['eee'] = eee_tmp.text
if self.state == "present":
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.manaddrtxenable:
exp_interface_mdn_cfg['manaddrtxenable'] = self.manaddrtxenable
if self.portdesctxenable:
exp_interface_mdn_cfg['portdesctxenable'] = self.portdesctxenable
if self.syscaptxenable:
exp_interface_mdn_cfg['syscaptxenable'] = self.syscaptxenable
if self.sysdesctxenable:
exp_interface_mdn_cfg['sysdesctxenable'] = self.sysdesctxenable
if self.sysnametxenable:
exp_interface_mdn_cfg['sysnametxenable'] = self.sysnametxenable
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if key == "ifname" and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname']))
if "manaddrtxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(manaddrtxenable=cur_interface_mdn_cfg['manaddrtxenable']))
if "portdesctxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(portdesctxenable=cur_interface_mdn_cfg['portdesctxenable']))
if "syscaptxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(syscaptxenable=cur_interface_mdn_cfg['syscaptxenable']))
if "sysdesctxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(sysdesctxenable=cur_interface_mdn_cfg['sysdesctxenable']))
if "sysnametxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(sysnametxenable=cur_interface_mdn_cfg['sysnametxenable']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_disable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_disable_exsit = True
return lldp_config
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.linkaggretxenable:
exp_interface_mdn_cfg['linkaggretxenable'] = self.linkaggretxenable
if self.macphytxenable:
exp_interface_mdn_cfg['macphytxenable'] = self.macphytxenable
if self.maxframetxenable:
exp_interface_mdn_cfg['maxframetxenable'] = self.maxframetxenable
if self.eee:
exp_interface_mdn_cfg['eee'] = self.eee
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if key == "ifname" and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname']))
if "linkaggretxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(linkaggretxenable=cur_interface_mdn_cfg['linkaggretxenable']))
if "macphytxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(macphytxenable=cur_interface_mdn_cfg['macphytxenable']))
if "maxframetxenable" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(maxframetxenable=cur_interface_mdn_cfg['maxframetxenable']))
if "eee" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(eee=cur_interface_mdn_cfg['eee']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_disable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_disable_exsit = True
return lldp_config
return lldp_config
def get_interface_tlv_enable_config(self):
lldp_config = list()
lldp_dict = dict()
cur_interface_mdn_cfg = dict()
exp_interface_mdn_cfg = dict()
if self.enable_flag == 1:
conf_str = CE_NC_GET_INTERFACE_TLV_ENABLE_CONFIG
conf_obj = get_nc_config(self.module, conf_str)
if "<data/>" in conf_obj:
return lldp_config
xml_str = conf_obj.replace('\r', '').replace('\n', '')
xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "")
xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
lldpenablesite = root.findall("lldp/lldpInterfaces/lldpInterface")
for ele in lldpenablesite:
ifname_tmp = ele.find("ifName")
protoidtxenable_tmp = ele.find("tlvTxEnable/protoIdTxEnable")
dcbx_tmp = ele.find("tlvTxEnable/dcbx")
if ifname_tmp is not None:
if ifname_tmp.text is not None:
cur_interface_mdn_cfg["ifname"] = ifname_tmp.text
if ifname_tmp is not None and protoidtxenable_tmp is not None:
if protoidtxenable_tmp.text is not None:
cur_interface_mdn_cfg["protoidtxenable"] = protoidtxenable_tmp.text
if ifname_tmp is not None and dcbx_tmp is not None:
if dcbx_tmp.text is not None:
cur_interface_mdn_cfg['dcbx'] = dcbx_tmp.text
if self.state == "present":
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.protoidtxenable:
exp_interface_mdn_cfg['protoidtxenable'] = self.protoidtxenable
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if "protoidtxenable" == str(key) and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(protoidtxenable=cur_interface_mdn_cfg['protoidtxenable']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_enable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_enable_exsit = True
return lldp_config
if self.type_tlv_enable == 'dcbx':
if self.ifname:
exp_interface_mdn_cfg['ifname'] = self.ifname
if self.dcbx:
exp_interface_mdn_cfg['dcbx'] = self.dcbx
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if "dcbx" == key and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(dcbx=cur_interface_mdn_cfg['dcbx']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_tlv_enable_exsit = True
self.changed = True
return lldp_config
else:
self.conf_tlv_enable_exsit = True
return lldp_config
return lldp_config
def get_interface_interval_config(self):
lldp_config = list()
lldp_dict = dict()
cur_interface_mdn_cfg = dict()
exp_interface_mdn_cfg = dict()
interface_lldp_disable_dict_tmp2 = self.get_interface_lldp_disable_pre_config()
if self.enable_flag == 1:
if interface_lldp_disable_dict_tmp2[self.ifname] != 'disabled':
conf_str = CE_NC_GET_INTERFACE_INTERVAl_CONFIG
conf_obj = get_nc_config(self.module, conf_str)
if "<data/>" in conf_obj:
return lldp_config
xml_str = conf_obj.replace('\r', '').replace('\n', '')
xml_str = xml_str.replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "")
xml_str = xml_str.replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
txintervalsite = root.findall("lldp/lldpInterfaces/lldpInterface")
for ele in txintervalsite:
ifname_tmp = ele.find("ifName")
txinterval_tmp = ele.find("msgInterval/txInterval")
if ifname_tmp is not None:
if ifname_tmp.text is not None:
cur_interface_mdn_cfg["ifname"] = ifname_tmp.text
if txinterval_tmp is not None:
if txinterval_tmp.text is not None:
cur_interface_mdn_cfg["txinterval"] = txinterval_tmp.text
if self.state == "present":
if self.ifname:
exp_interface_mdn_cfg["ifname"] = self.ifname
if self.txinterval:
exp_interface_mdn_cfg["txinterval"] = self.txinterval
if self.ifname == ifname_tmp.text:
key_list = exp_interface_mdn_cfg.keys()
key_list_cur = cur_interface_mdn_cfg.keys()
if len(key_list) != 0:
for key in key_list:
if "txinterval" == str(key) and self.ifname == cur_interface_mdn_cfg['ifname']:
lldp_config.append(dict(ifname=cur_interface_mdn_cfg['ifname'], txinterval=exp_interface_mdn_cfg['txinterval']))
if key in key_list_cur:
if str(exp_interface_mdn_cfg[key]) != str(cur_interface_mdn_cfg[key]):
self.conf_interval_exsit = True
lldp_config.append(cur_interface_mdn_cfg)
return lldp_config
else:
self.conf_interval_exsit = True
return lldp_config
return lldp_config
def config_global_lldp_enable(self):
if self.state == 'present':
if self.enable_flag == 0 and self.lldpenable == 'enabled':
xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "LLDP_ENABLE_CONFIG")
self.changed = True
elif self.enable_flag == 1 and self.lldpenable == 'disabled':
xml_str = CE_NC_MERGE_GLOBA_LLDPENABLE_CONFIG % self.lldpenable
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "LLDP_ENABLE_CONFIG")
self.changed = True
def config_interface_lldp_disable_config(self):
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.enable_flag == 1 and self.conf_interface_lldp_disable_exsit:
if self.ifname:
xml_str = CE_NC_MERGE_INTERFACE_LLDP_CONFIG % (self.ifname, self.lldpadminstatus)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "INTERFACE_LLDP_DISABLE_CONFIG")
self.changed = True
def config_interface_tlv_disable_config(self):
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.enable_flag == 1 and self.conf_tlv_disable_exsit:
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
if self.portdesctxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_PORTDESCTXENABLE % self.portdesctxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_PORTDESCTXENABLE")
self.changed = True
if self.manaddrtxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MANADDRTXENABLE % self.manaddrtxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_MANADDRTXENABLE")
self.changed = True
if self.syscaptxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSCAPTXENABLE % self.syscaptxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_SYSCAPTXENABLE")
self.changed = True
if self.sysdesctxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSDESCTXENABLE % self.sysdesctxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_SYSDESCTXENABLE")
self.changed = True
if self.sysnametxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_SYSNAMETXENABLE % self.sysnametxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_SYSNAMETXENABLE")
self.changed = True
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
if self.linkaggretxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_LINKAGGRETXENABLE % self.linkaggretxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_LINKAGGRETXENABLE")
self.changed = True
if self.macphytxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MACPHYTXENABLE % self.macphytxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_MACPHYTXENABLE")
self.changed = True
if self.maxframetxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_MAXFRAMETXENABLE % self.maxframetxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_MAXFRAMETXENABLE")
self.changed = True
if self.eee:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_DISABLE_EEE % self.eee) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_DISABLE_EEE")
self.changed = True
def config_interface_tlv_enable_config(self):
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.enable_flag == 1 and self.conf_tlv_enable_exsit:
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
if self.protoidtxenable:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_PROTOIDTXENABLE % self.protoidtxenable) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_ENABLE_DOT1_PORT_VLAN")
self.changed = True
if self.type_tlv_enable == 'dcbx':
if self.ifname:
if self.dcbx:
xml_str = (CE_NC_MERGE_INTERFACE_TLV_CONFIG_HEADER % self.ifname) + \
(CE_NC_MERGE_INTERFACE_TLV_CONFIG_ENABLE_DCBX % self.dcbx) + \
CE_NC_MERGE_INTERFACE_TLV_CONFIG_TAIL
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "TLV_ENABLE_DCBX_VLAN")
self.changed = True
def config_interface_interval_config(self):
if self.function_lldp_interface_flag == 'intervalINTERFACE':
tmp = self.get_interface_lldp_disable_pre_config()
if self.enable_flag == 1 and self.conf_interval_exsit and tmp[self.ifname] != 'disabled':
if self.ifname:
if self.txinterval:
xml_str = CE_NC_MERGE_INTERFACE_INTERVAl_CONFIG % (self.ifname, self.txinterval)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "INTERFACE_INTERVAL_CONFIG")
self.changed = True
def get_existing(self):
"""get existing information"""
self.get_lldp_enable_pre_config()
if self.lldpenable:
self.existing['globalLLDPENABLE'] = self.get_lldp_enable_pre_config()
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.existing['disableINTERFACE'] = self.get_interface_lldp_disable_config()
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
self.existing['tlvdisableINTERFACE'] = self.get_interface_tlv_disable_config()
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
self.existing['tlvenableINTERFACE'] = self.get_interface_tlv_enable_config()
if self.function_lldp_interface_flag == 'intervalINTERFACE':
self.existing['intervalINTERFACE'] = self.get_interface_interval_config()
def get_proposed(self):
"""get proposed"""
if self.lldpenable:
self.proposed = dict(lldpenable=self.lldpenable)
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.enable_flag == 1:
self.proposed = dict(ifname=self.ifname, lldpadminstatus=self.lldpadminstatus)
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.enable_flag == 1:
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
if self.manaddrtxenable:
self.proposed = dict(ifname=self.ifname, manaddrtxenable=self.manaddrtxenable)
if self.portdesctxenable:
self.proposed = dict(ifname=self.ifname, portdesctxenable=self.portdesctxenable)
if self.syscaptxenable:
self.proposed = dict(ifname=self.ifname, syscaptxenable=self.syscaptxenable)
if self.sysdesctxenable:
self.proposed = dict(ifname=self.ifname, sysdesctxenable=self.sysdesctxenable)
if self.sysnametxenable:
self.proposed = dict(ifname=self.ifname, sysnametxenable=self.sysnametxenable)
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
if self.linkaggretxenable:
self.proposed = dict(ifname=self.ifname, linkaggretxenable=self.linkaggretxenable)
if self.macphytxenable:
self.proposed = dict(ifname=self.ifname, macphytxenable=self.macphytxenable)
if self.maxframetxenable:
self.proposed = dict(ifname=self.ifname, maxframetxenable=self.maxframetxenable)
if self.eee:
self.proposed = dict(ifname=self.ifname, eee=self.eee)
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.enable_flag == 1:
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
if self.protoidtxenable:
self.proposed = dict(ifname=self.ifname, protoidtxenable=self.protoidtxenable)
if self.type_tlv_enable == 'dcbx':
if self.ifname:
if self.dcbx:
self.proposed = dict(ifname=self.ifname, dcbx=self.dcbx)
if self.function_lldp_interface_flag == 'intervalINTERFACE':
tmp1 = self.get_interface_lldp_disable_pre_config()
if self.enable_flag == 1 and tmp1[self.ifname] != 'disabled':
self.proposed = dict(ifname=self.ifname, txinterval=self.txinterval)
def config_lldp_interface(self):
"""config lldp interface"""
if self.lldpenable:
self.config_global_lldp_enable()
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.config_interface_lldp_disable_config()
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
self.config_interface_tlv_disable_config()
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
self.config_interface_tlv_enable_config()
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
self.config_interface_interval_config()
def get_end_state(self):
"""get end_state information"""
self.get_lldp_enable_pre_config()
if self.lldpenable:
self.end_state['globalLLDPENABLE'] = self.get_lldp_enable_pre_config()
if self.function_lldp_interface_flag == 'disableINTERFACE':
self.end_state['disableINTERFACE'] = self.get_interface_lldp_disable_config()
if self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
self.end_state['tlvdisableINTERFACE'] = self.get_interface_tlv_disable_config()
if self.function_lldp_interface_flag == 'tlvenableINTERFACE':
self.end_state['tlvenableINTERFACE'] = self.get_interface_tlv_enable_config()
if self.function_lldp_interface_flag == 'intervalINTERFACE':
self.end_state['intervalINTERFACE'] = self.get_interface_interval_config()
def get_update_cmd(self):
"""Get updated commands"""
cmds = []
if self.state == "present":
if self.lldpenable == "enabled":
cmds.append("lldp enable")
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.lldpadminstatus == 'disabled':
cmds.append("lldp disable")
else:
cmds.append("undo lldp disable")
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.manaddrtxenable:
if self.manaddrtxenable == "false":
cmds.append("lldp tlv-disable basic-tlv management-address")
if self.manaddrtxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv management-address")
if self.portdesctxenable:
if self.portdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv port-description")
if self.portdesctxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv port-description")
if self.syscaptxenable:
if self.syscaptxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-capability")
if self.syscaptxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-capability")
if self.sysdesctxenable:
if self.sysdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-description")
if self.sysdesctxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-description")
if self.sysnametxenable:
if self.sysnametxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-name")
if self.sysnametxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-name")
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.linkaggretxenable:
if self.linkaggretxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv link-aggregation")
if self.linkaggretxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv link-aggregation")
if self.macphytxenable:
if self.macphytxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv mac-physic")
if self.macphytxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv mac-physic")
if self.maxframetxenable:
if self.maxframetxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv max-frame-size")
if self.maxframetxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv max-frame-size")
if self.eee:
if self.eee == "false":
cmds.append("lldp tlv-disable dot3-tlv eee")
if self.eee == "true":
cmds.append("undo lldp tlv-disable dot3-tlv eee")
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.protoidtxenable:
if self.protoidtxenable == "false":
cmds.append("undo lldp tlv-enable dot1-tlv protocol-identity")
if self.protoidtxenable == "true":
cmds.append("lldp tlv-enable dot1-tlv protocol-identity")
if self.type_tlv_enable == 'dcbx':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.dcbx:
if self.dcbx == "false":
cmds.append("undo lldp tlv-enable dcbx")
if self.dcbx == "true":
cmds.append("lldp tlv-enable dcbx")
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
if self.ifname:
cmds.append("%s %s" % ("interface", self.ifname))
if self.txinterval:
cmds.append("lldp transmit fast-mode interval %s" % self.txinterval)
elif self.lldpenable == "disabled":
cmds.append("undo lldp enable")
else:
if self.enable_flag == 1:
if self.function_lldp_interface_flag == 'disableINTERFACE':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.lldpadminstatus == 'disabled':
cmds.append("lldp disable")
else:
cmds.append("undo lldp disable")
elif self.function_lldp_interface_flag == 'tlvdisableINTERFACE':
if self.type_tlv_disable == 'basic_tlv':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.manaddrtxenable:
if self.manaddrtxenable == "false":
cmds.append("lldp tlv-disable basic-tlv management-address")
if self.manaddrtxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv management-address")
if self.portdesctxenable:
if self.portdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv port-description")
if self.portdesctxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv port-description")
if self.syscaptxenable:
if self.syscaptxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-capability")
if self.syscaptxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-capability")
if self.sysdesctxenable:
if self.sysdesctxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-description")
if self.sysdesctxenable == "true":
cli_str = "%s %s\n" % (cli_str, "undo lldp tlv-disable basic-tlv system-description")
if self.sysnametxenable:
if self.sysnametxenable == "false":
cmds.append("lldp tlv-disable basic-tlv system-name")
if self.sysnametxenable == "true":
cmds.append("undo lldp tlv-disable basic-tlv system-name")
if self.type_tlv_disable == 'dot3_tlv':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.linkaggretxenable:
if self.linkaggretxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv link-aggregation")
if self.linkaggretxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv link-aggregation")
if self.macphytxenable:
if self.macphytxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv mac-physic")
if self.macphytxenable == "true":
cli_str = "%s %s\n" % (cli_str, "undo lldp tlv-disable dot3-tlv mac-physic")
if self.maxframetxenable:
if self.maxframetxenable == "false":
cmds.append("lldp tlv-disable dot3-tlv max-frame-size")
if self.maxframetxenable == "true":
cmds.append("undo lldp tlv-disable dot3-tlv max-frame-size")
if self.eee:
if self.eee == "false":
cmds.append("lldp tlv-disable dot3-tlv eee")
if self.eee == "true":
cmds.append("undo lldp tlv-disable dot3-tlv eee")
elif self.function_lldp_interface_flag == 'tlvenableINTERFACE':
if self.type_tlv_enable == 'dot1_tlv':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.protoidtxenable:
if self.protoidtxenable == "false":
cmds.append("undo lldp tlv-enable dot1-tlv protocol-identity")
if self.protoidtxenable == "true":
cmds.append("lldp tlv-enable dot1-tlv protocol-identity")
if self.type_tlv_enable == 'dcbx':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.dcbx:
if self.dcbx == "false":
cmds.append("undo lldp tlv-enable dcbx")
if self.dcbx == "true":
cmds.append("lldp tlv-enable dcbx")
elif self.function_lldp_interface_flag == 'intervalINTERFACE':
if self.ifname:
cmds.append("interface %s" % self.ifname)
if self.txinterval:
cmds.append("lldp transmit fast-mode interval %s" % self.txinterval)
self.updates_cmd = cmds
def work(self):
"""Execute task"""
self.check_params()
self.get_existing()
self.get_proposed()
self.config_lldp_interface()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""Main function"""
argument_spec = dict(
lldpenable=dict(choices=['enabled', 'disabled']),
function_lldp_interface_flag=dict(choices=['disableINTERFACE', 'tlvdisableINTERFACE', 'tlvenableINTERFACE', 'intervalINTERFACE'], type='str'),
type_tlv_disable=dict(choices=['basic_tlv', 'dot3_tlv'], type='str'),
type_tlv_enable=dict(choices=['dot1_tlv', 'dcbx'], type='str'),
ifname=dict(type='str'),
lldpadminstatus=dict(choices=['txOnly', 'rxOnly', 'txAndRx', 'disabled'], type='str'),
manaddrtxenable=dict(type='bool'),
portdesctxenable=dict(type='bool'),
syscaptxenable=dict(type='bool'),
sysdesctxenable=dict(type='bool'),
sysnametxenable=dict(type='bool'),
portvlantxenable=dict(type='bool'),
protovlantxenable=dict(type='bool'),
txprotocolvlanid=dict(type='int'),
vlannametxenable=dict(type='bool'),
txvlannameid=dict(type='int'),
txinterval=dict(type='int'),
protoidtxenable=dict(type='bool'),
macphytxenable=dict(type='bool'),
linkaggretxenable=dict(type='bool'),
maxframetxenable=dict(type='bool'),
eee=dict(type='bool'),
dcbx=dict(type='bool'),
state=dict(type='str', choices=['absent', 'present'], default='present'),
)
lldp_interface_obj = Lldp_interface(argument_spec)
lldp_interface_obj.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
ag-wood/ansible-modules-extras | cloud/vmware/vmware_portgroup.py | 47 | 4163 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_portgroup
short_description: Create a VMware portgroup
description:
- Create a VMware portgroup
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
hostname:
description:
- The hostname or IP address of the ESXi server
required: True
username:
description:
- The username of the ESXi server
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the ESXi server
required: True
aliases: ['pass', 'pwd']
switch_name:
description:
- vSwitch to modify
required: True
portgroup_name:
description:
- Portgroup name to add
required: True
vlan_id:
description:
- VLAN ID to assign to portgroup
required: True
'''
EXAMPLES = '''
Example from Ansible playbook
- name: Add Management Network VM Portgroup
local_action:
module: vmware_portgroup
hostname: esxi_hostname
username: esxi_username
password: esxi_password
switch_name: vswitch_name
portgroup_name: portgroup_name
vlan_id: vlan_id
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def create_port_group(host_system, portgroup_name, vlan_id, vswitch_name):
config = vim.host.NetworkConfig()
config.portgroup = [vim.host.PortGroup.Config()]
config.portgroup[0].changeOperation = "add"
config.portgroup[0].spec = vim.host.PortGroup.Specification()
config.portgroup[0].spec.name = portgroup_name
config.portgroup[0].spec.vlanId = vlan_id
config.portgroup[0].spec.vswitchName = vswitch_name
config.portgroup[0].spec.policy = vim.host.NetworkPolicy()
host_network_config_result = host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
return True
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='int')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
portgroup_name = module.params['portgroup_name']
switch_name = module.params['switch_name']
vlan_id = module.params['vlan_id']
try:
content = connect_to_api(module)
host = get_all_objs(content, [vim.HostSystem])
if not host:
raise SystemExit("Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = create_port_group(host_system, portgroup_name, vlan_id, switch_name)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
loveyoupeng/rt | modules/web/src/main/native/Tools/Scripts/webkitpy/replay/main.py | 3 | 5231 | # Copyright (C) 2011 Google Inc. All rights reserved.
# Copyright (C) 2014 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import shutil
import sys
import tempfile
from webkitpy.common.checkout.scm.detection import detect_scm_system
from webkitpy.common.system.executive import ScriptError
class InputGeneratorTests:
def __init__(self, reset_results, executive):
self.reset_results = reset_results
self.executive = executive
def generate_from_json(self, json_file, output_directory):
cmd = ['python',
'JavaScriptCore/replay/scripts/CodeGeneratorReplayInputs.py',
'--outputDir', output_directory,
'--force',
'--framework', 'Test',
'--test',
json_file]
exit_code = 0
try:
stderr_output = self.executive.run_command(cmd)
if stderr_output:
self.write_error_file(json_file, output_directory, stderr_output)
except ScriptError, e:
print e.output
exit_code = e.exit_code
return exit_code
def write_error_file(self, input_filepath, output_directory, error_output):
output_filepath = os.path.join(output_directory, os.path.basename(input_filepath) + '-error')
with open(output_filepath, "w") as output_file:
output_file.write(error_output)
def detect_changes(self, work_directory, reference_directory):
changes_found = False
for output_file in os.listdir(work_directory):
cmd = ['diff',
'-u',
'-N',
os.path.join(reference_directory, output_file),
os.path.join(work_directory, output_file)]
exit_code = 0
try:
output = self.executive.run_command(cmd)
except ScriptError, e:
output = e.output
exit_code = e.exit_code
if exit_code or output:
print 'FAIL: %s' % output_file
print output
changes_found = True
else:
print 'PASS: %s' % output_file
return changes_found
def run_tests(self, input_directory, reference_directory):
work_directory = reference_directory
passed = True
for input_file in os.listdir(input_directory):
(name, extension) = os.path.splitext(input_file)
if extension != '.json':
continue
# Generate output into the work directory (either the given one or a
# temp one if not reset_results is performed)
if not self.reset_results:
work_directory = tempfile.mkdtemp()
if self.generate_from_json(os.path.join(input_directory, input_file), work_directory):
passed = False
if self.reset_results:
print "Reset results for test: %s" % (input_file)
continue
# Detect changes
if self.detect_changes(work_directory, reference_directory):
passed = False
shutil.rmtree(work_directory)
return passed
def main(self):
current_scm = detect_scm_system(os.curdir)
os.chdir(os.path.join(current_scm.checkout_root, 'Source'))
all_tests_passed = True
input_directory = os.path.join('JavaScriptCore', 'replay', 'scripts', 'tests')
reference_directory = os.path.join('JavaScriptCore', 'replay', 'scripts', 'tests', 'expected')
if not self.run_tests(input_directory, reference_directory):
all_tests_passed = False
print ''
if all_tests_passed:
print 'All tests PASS!'
return 0
else:
print 'Some tests FAIL! (To update the reference files, execute "run-input-generator-tests --reset-results")'
return -1
| gpl-2.0 |
caiocsalvador/whats_the_craic | lib/python3.4/site-packages/django/contrib/auth/hashers.py | 3 | 17456 | from __future__ import unicode_literals
import base64
import binascii
import hashlib
import importlib
from collections import OrderedDict
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache
from django.utils.crypto import (
constant_time_compare, get_random_string, pbkdf2,
)
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
must_update = hasher.algorithm != preferred.algorithm
if not must_update:
must_update = preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
@lru_cache.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@lru_cache.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 24000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Need to reevaluate the force_bytes call once bcrypt is supported on
# Python 3
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
# Ensure that our data is a bytestring
data = force_bytes(data)
# force_bytes() necessary for py-bcrypt compatibility
hashpw = force_bytes(bcrypt.hashpw(password, data))
return constant_time_compare(data, hashpw)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return OrderedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
def must_update(self, encoded):
algorithm, empty, algostr, rounds, data = encoded.split('$', 4)
return int(rounds) != self.rounds
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSHA256PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
| mit |
royalharsh/grpc | tools/buildgen/bunch.py | 44 | 2469 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Allows dot-accessible dictionaries."""
class Bunch(dict):
def __init__(self, d):
dict.__init__(self, d)
self.__dict__.update(d)
# Converts any kind of variable to a Bunch
def to_bunch(var):
if isinstance(var, list):
return [to_bunch(i) for i in var]
if isinstance(var, dict):
ret = {}
for k, v in var.items():
if isinstance(v, (list, dict)):
v = to_bunch(v)
ret[k] = v
return Bunch(ret)
else:
return var
# Merges JSON 'add' into JSON 'dst'
def merge_json(dst, add):
if isinstance(dst, dict) and isinstance(add, dict):
for k, v in add.items():
if k in dst:
if k == '#': continue
merge_json(dst[k], v)
else:
dst[k] = v
elif isinstance(dst, list) and isinstance(add, list):
dst.extend(add)
else:
raise Exception('Tried to merge incompatible objects %s %s\n\n%r\n\n%r' % (type(dst).__name__, type(add).__name__, dst, add))
| bsd-3-clause |
ataylor32/django | django/forms/utils.py | 169 | 5975 | from __future__ import unicode_literals
import json
import sys
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import ugettext_lazy as _
try:
from collections import UserList
except ImportError: # Python 2
from UserList import UserList
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
else:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
@python_2_unicode_compatible
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super(ErrorList, self).__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive dateimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| bsd-3-clause |
gavioto/fiware-orion | test/acceptance/lettuce/integration/steps_lib/background.py | 8 | 1483 | # -*- coding: utf-8 -*-
"""
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U
#
# This file is part of Orion Context Broker.
#
# Orion Context Broker is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Orion Context Broker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Orion Context Broker. If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by this license please contact with
# iot_support at tid dot es
"""
__author__ = 'Jon Calderin Goñi (jon.caldering@gmail.com)'
from lettuce import step, world
from integration.tools.general_utils import start_cb
@step('the Context Broker started with multitenancy')
def the_context_broker_started_with_multitenancy(step):
"""
Get the info of the properties and start the context broker
:param step:
:return:
"""
if world.cb_config_to_start != 'multitenancy':
world.bin_parms = '-multiservice -t 0-255 -db acceptance'
start_cb(world.bin_parms)
world.cb_config_to_start = 'multitenancy' | agpl-3.0 |
F-Secure/resource-api | src/tests/http_test.py | 2 | 8199 | """
Copyright (c) 2014-2015 F-Secure
See LICENSE for details
"""
import json
import mock
from werkzeug.test import Client as BaseClient
from werkzeug.wrappers import BaseResponse
from resource_api_http.http import Application
from resource_api import errors
from .base_test import BaseTest
class Client(BaseClient):
def open(self, *args, **kw):
kw["content_type"] = "application/json"
if "data" in kw:
kw["data"] = json.dumps(kw["data"])
return BaseClient.open(self, *args, **kw)
class BaseHttpTest(BaseTest):
def setUp(self):
super(BaseHttpTest, self).setUp()
self.client = Client(Application(self.srv), BaseResponse)
def assertResponse(self, response, data=None, status_code=200):
self.assertEqual(response.status_code, status_code)
self.assertEqual(response.headers["Content-Type"], "application/json")
if data is not None:
self.assertEqual(json.loads(response.data), data)
class HttpTest(BaseHttpTest):
def test_get_schema(self):
self.assertResponse(
self.client.options("/"),
self.srv.get_schema())
def test_get_resource_collection(self):
self.assertResponse(
self.client.get("/foo.Source"),
self.src.serialize())
def test_get_resource_collection_with_filtering(self):
self.client.get("/foo.Source?foo=bar&foo=bar2&wang.wong=3&query_param=Foo")
self.assertEqual(self.srv.storage.call_log[-1],
("GET_KEYS", "tests.sample_app.resources.Source", {"query_param": u"Foo"}))
def test_get_resource_collection_count(self):
self.assertResponse(
self.client.get("/foo.Source:count"),
len(self.src))
def test_get_resource_collection_count_with_filtering(self):
self.client.get("/foo.Source:count?foo=bar&foo=bar2&wang.wong=3&query_param=Foo")
self.assertEqual(self.srv.storage.call_log[-1],
("GET_KEYS", "tests.sample_app.resources.Source", {"query_param": u"Foo"}))
def test_get_resource_item(self):
self.assertResponse(
self.client.get("/foo.Source/1"),
self.src.get(1).serialize())
def test_delete_resource_item(self):
self.assertResponse(
self.client.delete("/foo.Source/1"),
status_code=204)
self.assertRaises(errors.DoesNotExist, self.src.get, 1)
def test_update_resource_item(self):
self.assertResponse(
self.client.patch("/foo.Source/1", data={"extra": "wuga wuga"}),
status_code=204)
self.assertEqual(self.src.get(1).data["extra"], "wuga wuga")
def test_create_resource_item(self):
self.assertResponse(
self.client.post("/foo.Source", data={"pk": 3, "extra": "foo", "@links": {"targets": [{
"@target": 1, "extra": "woof", "more_data": "bar"}]}}),
data=3, status_code=201)
self.assertEqual(self.src.get(3).data["extra"], "foo")
self.assertEqual(self.src.get(3).links.targets.get(1).data["extra"], "woof")
def test_get_link_to_many_collection(self):
self.assertResponse(
self.client.get("/foo.Source/1/targets"),
self.src.get(1).links.targets.serialize())
def test_get_link_to_many_collection_with_filtering(self):
self.client.get("/foo.Source/1/targets?foo=bar&foo=bar2&wang.wong=3&query_param=Foo")
self.assertEqual(self.srv.storage.call_log[-1],
("GET_KEYS", (1, "tests.sample_app.resources.Source:targets"), {"query_param": "Foo"}))
def test_get_link_to_many_collection_count(self):
self.assertResponse(
self.client.get("/foo.Source/1/targets:count"),
len(self.src.get(1).links.targets))
def test_get_link_to_many_collection_count_with_filtering(self):
self.client.get("/foo.Source/1/targets:count?foo=bar&foo=bar2&wang.wong=3&query_param=Foo")
self.assertEqual(self.srv.storage.call_log[-1],
("GET_KEYS", (1, "tests.sample_app.resources.Source:targets"), {"query_param": "Foo"}))
def test_update_link_to_many_item(self):
self.assertResponse(
self.client.patch("/foo.Source/1/targets/1", data={"extra": "cadavr"}),
status_code=204)
self.assertEqual(self.src.get(1).links.targets.get(1).data["extra"], "cadavr")
def test_delete_link_to_many_item(self):
self.assertResponse(
self.client.delete("/foo.Source/1/targets/1"),
status_code=204)
self.assertRaises(errors.DoesNotExist, self.src.get(1).links.targets.get, 1)
def test_get_link_to_many_item_data(self):
self.assertResponse(
self.client.get("/foo.Source/1/targets/1:data"),
self.src.get(1).links.targets.get(1).serialize())
def test_get_reverse_link_to_many_item_data(self):
self.assertResponse(
self.client.get("/foo.Target/1/sources/1:data"),
self.target.get(1).links.sources.get(1).serialize())
def test_create_link_to_many_item(self):
self.assertResponse(
self.client.post("/foo.Source/1/targets", data={"@target": 2, "extra": "uff"}),
status_code=201)
self.assertEqual(self.src.get(1).links.targets.get(2).data["extra"], "uff")
def test_set_link_to_one(self):
self.assertResponse(
self.client.put("/foo.Source/1/the_target", data={"@target": 2, "extra": "uff"}),
status_code=201)
self.assertEqual(self.src.get(1).links.the_target.item.data, {"extra": "uff"})
def test_update_link_to_one(self):
self.assertResponse(
self.client.patch("/foo.Source/1/the_target/item", data={"extra": "cadavr"}),
status_code=204)
self.assertEqual(self.src.get(1).links.the_target.item.data["extra"], "cadavr")
def test_delete_link_to_one(self):
self.assertResponse(
self.client.delete("/foo.Source/1/the_target/item"),
status_code=204)
self.assertResponse(self.client.get("/foo.Source/1/the_target/item"), status_code=404)
def test_get_link_to_one_data(self):
self.assertResponse(
self.client.get("/foo.Source/1/the_target/item:data"),
self.src.get(1).links.the_target.item.serialize())
def test_get_link_to_one(self):
self.assertResponse(self.client.get("/foo.Source/1/the_target/item"), data=2, status_code=200)
class HttpErrorTest(BaseHttpTest):
def setUp(self):
super(HttpErrorTest, self).setUp()
self._url_map = url_map = mock.Mock()
class CustomApp(Application):
def __init__(self, service, debug=False):
self._url_map = url_map
self._debug = debug
self.client = Client(CustomApp(self.srv), BaseResponse)
def _check_exception(self, exception_class, data, error_code):
self._url_map.bind_to_environ.side_effect = exception_class(data)
self.assertResponse(
self.client.get("/URL"),
data=data,
status_code=error_code)
def test_does_not_exist(self):
self._check_exception(errors.DoesNotExist, "Foo bar", 404)
def test_multiple_found(self):
self._check_exception(errors.MultipleFound, "Foo bar", 500)
def test_validation_error(self):
self._check_exception(errors.ValidationError, "Foo bar", 400)
def test_data_conflict_error(self):
self._check_exception(errors.DataConflictError, "Foo bar", 409)
def test_forbidden(self):
self._check_exception(errors.Forbidden, "Foo bar", 405)
def test_authorization_error(self):
self._check_exception(errors.AuthorizationError, "Foo bar", 403)
def test_not_implemented_error(self):
self._check_exception(NotImplementedError, "Foo bar", 501)
def test_server_exception(self):
self._url_map.bind_to_environ.side_effect = Exception("Foo bar")
self.assertResponse(
self.client.get("/URL"),
data="Server error",
status_code=500)
| apache-2.0 |
icdishb/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 4 | 8628 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=10, n_jobs=1)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
diff_support = rfe.get_support() == rfe_svc.get_support()
assert_true(sum(diff_support) == len(diff_support))
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
| bsd-3-clause |
ZombieNinjaPirate/HonSSH-utilities | bifrozt/find/networkobj.py | 1 | 3561 | """Searches for network related objects. """
"""
Copyright (c) 2015, Are Hansen - Honeypot Development
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND AN
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import geoip2.database
import sys
import re
def ipv4geo(ipv4_obj):
"""Given a IPv4 address the function will quesry the GeoLite2-City.mmdb database and
return the ipv4_obj as a dictionary with as much info regarding its geo location as
possible. """
# Making sure we are using utf-8 as default encoding, eunicode can mess s**t up for us
reload(sys)
sys.setdefaultencoding("utf-8")
readipdb = geoip2.database.Reader('/home/odin/Documents/PYTHON/BZ009/geoipdb/GeoLite2-City.mmdb')
gelocd = {}
gelocl = []
for ipv4 in sorted(ipv4_obj):
response = readipdb.city(ipv4)
gelocd[ipv4] = {
'ISO': response.country.iso_code,
'CN': response.country.name,
'RGN': response.subdivisions.most_specific.name,
'CITY': response.city.name,
'ZIP': response.postal.code,
'LAT': response.location.latitude,
'LONG': response.location.longitude
}
gelocl.append(gelocd)
return gelocl
def ipv4match(ipv4_obj):
"""Checks the ipv4_obj, if the object is a string it will be converted to a list object.
Itterate over the list elements and extract anything that matches the regex of a IPv4 address,
if the extracted object is not present in the ipv4_list it will be appended to that list. The
ipv4_list is sorted and return as the ipv4_sort list. This function searche for an IPv4 address
containig all four octets. """
ipv4_list = []
ipv4_sort = []
if type(ipv4_obj) == str:
ipv4_obj = ipv4_obj.split()
for obj in ipv4_obj:
#matches = re.findall(r"[\d,\w,\W,\S,\s](\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})[\w,\W,\S,\s,\n]", obj)
#matches = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", obj)
matches = re.match(r"[1-9]\d{1,2}\.\d{1,3}\.\d{1,3}\.\d{1,3}", obj)
if matches:
print matches
if matches not in ipv4_list:
ipv4_list.append(matches[0])
for ipv4 in sorted(ipv4_list):
ipv4_sort.append(ipv4)
return ipv4_sort
| gpl-2.0 |
pshen/ansible | lib/ansible/modules/network/avi/avi_cloudproperties.py | 49 | 3631 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudproperties
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of CloudProperties Avi RESTful Object
description:
- This module is used to configure CloudProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cc_props:
description:
- Cloudconnector properties.
cc_vtypes:
description:
- Cloud types supported by cloudconnector.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S.
hyp_props:
description:
- Hypervisor properties.
info:
description:
- Properties specific to a cloud type.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create CloudProperties object
avi_cloudproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cloudproperties
"""
RETURN = '''
obj:
description: CloudProperties (api/cloudproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cc_props=dict(type='dict',),
cc_vtypes=dict(type='list',),
hyp_props=dict(type='list',),
info=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudproperties',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
tareqalayan/ansible | lib/ansible/plugins/filter/json_query.py | 197 | 1857 | # (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleFilterError
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:
raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
except Exception as e:
# For older jmespath, we can get ValueError and TypeError without much info.
raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
| gpl-3.0 |
streed/antZoo | antZoo/clusterView/app.py | 1 | 2109 | import random
import uuid
from flask import Flask, render_template
from flask.ext.wtf import Form
from wtforms import TextField
from wtforms.validators import Required
from ..gossipService.gossiping.ttypes import GossipData
from ..gossip import make_client
app = Flask( __name__ )
nodes = [
{ "address": "localhost", "port": 33000 },
]
class KeyValueForm( Form ):
key = TextField( "Key", validators=[Required()] )
value = TextField( "Value", validators=[Required()] )
@app.route( "/", methods=[ "GET", "POST" ] )
def index():
form = KeyValueForm()
if( form.validate_on_submit() ):
key = form.key.data
value = form.value.data
n = random.choice( nodes )
c = make_client( n["address"], n["port"] )
c.disseminate( GossipData( uuid=str( uuid.uuid4() ), key=key, value=value ) )
nodes2 = []
views = []
for n in nodes:
try:
c = make_client( n["address"], n["port"] )
data = c.getData()
view = c.get_view()
for v in view:
i = { "address": v.address, "port": v.port }
if not i in nodes:
nodes.append( i )
nodes2.append( ( "%s:%d" % ( n["address"], n["port"] ), data[:], view ) )
except:
pass
return render_template( "index.html", form=form, nodes=nodes2 )
@app.route( "/nodes" )
def nodesValues():
ret = []
for n in nodes:
try:
c = make_client( n["address"], n["port"] )
data = c.getData()
ret.append( ( "%s:%d" % ( n["address"], n["port"] ), data[:] ) )
except:
pass
return render_template( "nodes.html", nodes=ret )
@app.route( "/views" )
def nodesView():
ret = []
for n in nodes:
c = make_client( n["address"], n["port"] )
view = c.get_view()
vv = []
for v in view:
vv.append( "%s:%d" % ( v.address, v.port ) )
ret.append( ( "%s:%d" % ( n["address"], n["port"] ), vv, ) )
return render_template( "views.html", nodes=ret )
| mit |
CompSciCabal/SMRTYPRTY | experiments/scott/immutable_strings/test_strings.py | 1 | 4272 | import unittest
from itertools import product
from .str_default import String
from .str_list import ListString
from .str_bst import BSTString
from .str_ll import LinkedListString
from .str_rope import RopeString
from .str_mway import MWayString
class TestDefault(unittest.TestCase):
def setUp(self):
self.String = String
def test_indexing(self):
s = "some string"
my_s = self.String(s)
for i, c in enumerate(s):
assert c == my_s[i]
with self.assertRaises(IndexError):
my_s[-1]
with self.assertRaises(IndexError):
my_s[32]
def run_concat_test(self, desc, a, b, expected):
a = self.String(a)
b = self.String(b)
expected = self.String(expected)
a_before = a
b_before = b
result = a + b
assert isinstance(result, self.String), desc
assert result == expected, desc
assert a == a_before, desc
assert b == b_before, desc
def test_concat_simple(self):
cases = [
("empty and empty", "", "", ""),
("empty and normal", "", "some string", "some string"),
("normal and empty", "some string", "", "some string"),
("normal and normal", "some", " string", "some string")]
for desc, a, b, expected in cases:
self.run_concat_test(desc, a, b, expected)
def test_concat_small_lengths(self):
n = 17
cases = [("len = {} and len = {}".format(i, j),
"a" * i,
"b" * j,
"a" * i + "b" * j)
for i, j in product(range(n), range(n))]
for desc, a, b, expected in cases:
self.run_concat_test(desc, a, b, expected)
def run_split_test(self, desc, a, n):
my_a = self.String(a)
a_before = my_a
expected = [self.String(a[i:i+n]) for i in range(0, len(a), n)]
result = my_a.split(n)
assert isinstance(result, list), desc
assert result == expected, desc
assert a_before == my_a, desc
def test_split_simple(self):
cases = [("empty and 1", "", 1),
("empty and -1", "", -1),
("normal and 1", "some string", 1),
("normal and 2", "some string", 2),
("normal and 3", "some string", 3),
("normal and longer than string", "some string", 32)]
for desc, a, n in cases:
self.run_split_test(desc, a, n)
def test_split_error(self):
cases = [("empty and 0", "", 0),
("normal and 0", "some string", 0)]
for desc, a, n in cases:
with self.assertRaises(ValueError):
self.String(a).split(n)
def run_split_join_test(self, desc, a, n):
my_a = self.String(a)
split = my_a.split(n)
joined = self.String()
for s in split:
joined = joined + s
assert joined == my_a, desc
def run_sjsj_test(self, desc, a, n):
my_a = self.String(a)
split = my_a.split(n)
joined = self.String()
for s in split:
joined = joined + s
assert joined == my_a, desc
split_2 = joined.split(n)
joined_2 = self.String()
for s in split:
joined_2 = joined_2 + s
assert joined_2 == my_a, desc
def test_split_join(self):
cases = [("normal and 1", "some string", 1),
("normal and 2", "some string", 2),
("normal and 3", "some string", 3),
("normal and longer than string", "some string", 32)]
for desc, a, n in cases:
self.run_split_join_test(desc, a, n)
for desc, a, n in cases:
self.run_sjsj_test(desc, a, n)
class TestListString(TestDefault):
def setUp(self):
self.String = ListString
class TestBSTString(TestDefault):
def setUp(self):
self.String = BSTString
class TestLinkedListString(TestDefault):
def setUp(self):
self.String = LinkedListString
class TestRopeString(TestDefault):
def setUp(self):
self.String = RopeString
class TestMWayString(TestDefault):
def setUp(self):
self.String = MWayString
| unlicense |
wenboyu2/yahoo-earnings-calendar | yahoo_earnings_calendar/scraper.py | 1 | 6689 | '''
Yahoo! Earnings Calendar scraper
'''
import datetime
import json
import logging
import requests
import time
BASE_URL = 'https://finance.yahoo.com/calendar/earnings'
BASE_STOCK_URL = 'https://finance.yahoo.com/quote'
RATE_LIMIT = 2000.0
SLEEP_BETWEEN_REQUESTS_S = 60 * 60 / RATE_LIMIT
OFFSET_STEP = 100
# Logging config
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class YahooEarningsCalendar(object):
"""
This is the class for fetching earnings data from Yahoo! Finance
"""
def __init__(self, delay=SLEEP_BETWEEN_REQUESTS_S):
self.delay = delay
def _get_data_dict(self, url):
time.sleep(self.delay)
page = requests.get(url)
page_content = page.content.decode(encoding='utf-8', errors='strict')
page_data_string = [row for row in page_content.split(
'\n') if row.startswith('root.App.main = ')][0][:-1]
page_data_string = page_data_string.split('root.App.main = ', 1)[1]
return json.loads(page_data_string)
def get_next_earnings_date(self, symbol):
"""Gets the next earnings date of symbol
Args:
symbol: A ticker symbol
Returns:
Unix timestamp of the next earnings date
Raises:
Exception: When symbol is invalid or earnings date is not available
"""
url = '{0}/{1}'.format(BASE_STOCK_URL, symbol)
try:
page_data_dict = self._get_data_dict(url)
return page_data_dict['context']['dispatcher']['stores']['QuoteSummaryStore']['calendarEvents']['earnings']['earningsDate'][0]['raw']
except:
raise Exception('Invalid Symbol or Unavailable Earnings Date')
def earnings_on(self, date, offset=0, count=1):
"""Gets earnings calendar data from Yahoo! on a specific date.
Args:
date: A datetime.date instance representing the date of earnings data to be fetched.
offset: Position to fetch earnings data from.
count: Total count of earnings on date.
Returns:
An array of earnings calendar data on date given. E.g.,
[
{
"ticker": "AMS.S",
"companyshortname": "Ams AG",
"startdatetime": "2017-04-23T20:00:00.000-04:00",
"startdatetimetype": "TAS",
"epsestimate": null,
"epsactual": null,
"epssurprisepct": null,
"gmtOffsetMilliSeconds": 72000000
},
...
]
Raises:
TypeError: When date is not a datetime.date object.
"""
if offset >= count:
return []
if not isinstance(date, datetime.date):
raise TypeError(
'Date should be a datetime.date object')
date_str = date.strftime('%Y-%m-%d')
logger.debug('Fetching earnings data for %s', date_str)
dated_url = '{0}?day={1}&offset={2}&size={3}'.format(
BASE_URL, date_str, offset, OFFSET_STEP)
page_data_dict = self._get_data_dict(dated_url)
stores_dict = page_data_dict['context']['dispatcher']['stores']
earnings_count = stores_dict['ScreenerCriteriaStore']['meta']['total']
# Recursively fetch more earnings on this date
new_offset = offset + OFFSET_STEP
more_earnings = self.earnings_on(date, new_offset, earnings_count)
curr_offset_earnings = stores_dict['ScreenerResultsStore']['results']['rows']
return curr_offset_earnings + more_earnings
def earnings_between(self, from_date, to_date):
"""Gets earnings calendar data from Yahoo! in a date range.
Args:
from_date: A datetime.date instance representing the from-date (inclusive).
to_date: A datetime.date instance representing the to-date (inclusive).
Returns:
An array of earnigs calendar data of date range. E.g.,
[
{
"ticker": "AMS.S",
"companyshortname": "Ams AG",
"startdatetime": "2017-04-23T20:00:00.000-04:00",
"startdatetimetype": "TAS",
"epsestimate": null,
"epsactual": null,
"epssurprisepct": null,
"gmtOffsetMilliSeconds": 72000000
},
...
]
Raises:
ValueError: When from_date is after to_date.
TypeError: When either from_date or to_date is not a datetime.date object.
"""
if from_date > to_date:
raise ValueError(
'From-date should not be after to-date')
if not (isinstance(from_date, datetime.date) and
isinstance(to_date, datetime.date)):
raise TypeError(
'From-date and to-date should be datetime.date objects')
earnings_data = []
current_date = from_date
delta = datetime.timedelta(days=1)
while current_date <= to_date:
earnings_data += self.earnings_on(current_date)
current_date += delta
return earnings_data
def get_earnings_of(self, symbol):
"""Returns all the earnings dates of a symbol
Args:
symbol: A ticker symbol
Returns:
Array of all earnings dates with supplemental information
Raises:
Exception: When symbol is invalid or earnings date is not available
"""
url = 'https://finance.yahoo.com/calendar/earnings?symbol={0}'.format(symbol)
try:
page_data_dict = self._get_data_dict(url)
return page_data_dict["context"]["dispatcher"]["stores"]["ScreenerResultsStore"]["results"]["rows"]
except:
raise Exception('Invalid Symbol or Unavailable Earnings Date')
if __name__ == '__main__': # pragma: no cover
date_from = datetime.datetime.strptime(
'Feb 1 2018 10:00AM', '%b %d %Y %I:%M%p')
date_to = datetime.datetime.strptime(
'Feb 4 2018 1:00PM', '%b %d %Y %I:%M%p')
yec = YahooEarningsCalendar()
print(yec.earnings_on(date_from))
print(yec.earnings_between(date_from, date_to))
# Returns the next earnings date of BOX in Unix timestamp
print(yec.get_next_earnings_date('box'))
# Returns a list of all available earnings of BOX
print(yec.get_earnings_of('box'))
| mit |
TyberiusPrime/nikola | nikola/data/themes/base/messages/messages_fil.py | 28 | 1044 | # -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "",
"(active)": "",
"Also available in:": "",
"Archive": "",
"Authors": "",
"Categories": "",
"Comments": "",
"LANGUAGE": "",
"Languages:": "",
"More posts about %s": "",
"Newer posts": "",
"Next post": "",
"No posts found.": "",
"Nothing found.": "",
"Older posts": "",
"Original site": "",
"Posted:": "",
"Posts about %s": "",
"Posts by %s": "",
"Posts for year %s": "",
"Posts for {month} {day}, {year}": "",
"Posts for {month} {year}": "",
"Previous post": "",
"Publication date": "",
"RSS feed": "",
"Read in English": "",
"Read more": "",
"Skip to main content": "",
"Source": "",
"Subcategories:": "",
"Tags and Categories": "",
"Tags": "",
"Uncategorized": "",
"Updates": "",
"Write your page here.": "",
"Write your post here.": "",
"old posts, page %d": "",
"page %d": "",
}
| mit |
aakash-cr7/zulip | zerver/lib/response.py | 13 | 2084 | from __future__ import absolute_import
from django.http import HttpResponse, HttpResponseNotAllowed
import ujson
from typing import Optional, Any, Dict, List, Text
from zerver.lib.str_utils import force_bytes
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, realm, www_authenticate=None):
# type: (Text, Optional[Text]) -> None
HttpResponse.__init__(self)
if www_authenticate is None:
self["WWW-Authenticate"] = 'Basic realm="%s"' % (realm,)
elif www_authenticate == "session":
self["WWW-Authenticate"] = 'Session realm="%s"' % (realm,)
else:
raise AssertionError("Invalid www_authenticate value!")
def json_unauthorized(message, www_authenticate=None):
# type: (Text, Optional[Text]) -> HttpResponse
resp = HttpResponseUnauthorized("zulip", www_authenticate=www_authenticate)
resp.content = force_bytes(ujson.dumps({"result": "error",
"msg": message}) + "\n")
return resp
def json_method_not_allowed(methods):
# type: (List[Text]) -> Text
resp = HttpResponseNotAllowed(methods)
resp.content = force_bytes(ujson.dumps({"result": "error",
"msg": "Method Not Allowed",
"allowed_methods": methods}))
return resp
def json_response(res_type="success", msg="", data=None, status=200):
# type: (Text, Text, Optional[Dict[str, Any]], int) -> HttpResponse
content = {"result": res_type, "msg": msg}
if data is not None:
content.update(data)
return HttpResponse(content=ujson.dumps(content) + "\n",
content_type='application/json', status=status)
def json_success(data=None):
# type: (Optional[Dict[str, Any]]) -> HttpResponse
return json_response(data=data)
def json_error(msg, data=None, status=400):
# type: (str, Optional[Dict[str, Any]], int) -> HttpResponse
return json_response(res_type="error", msg=msg, data=data, status=status)
| apache-2.0 |
Krozark/Harpe-v1.0 | Harpe-server/Harpe-website/Kraggne/contrib/flatblocks/admin.py | 3 | 2508 | from django.contrib import admin
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from Kraggne.contrib.flatblocks.models import GenericFlatblock, GenericFlatblockList, TemplateBlock
from Kraggne.contrib.flatblocks.forms import GenericFlatblockListForm, GenericFlatblockForm, TempateBlockForm
class GenericFlatblockAdmin(admin.ModelAdmin):
list_display = (
'related_object_changelink',
'slug'
)
list_display_links = ('slug',)
form = GenericFlatblockForm
def related_object_changelink(self, obj):
return '<a href="%s">%s - %s</a>' % (
self.generate_related_object_admin_link(obj.content_object),
obj.slug,
obj.content_object.__unicode__(),
)
related_object_changelink.allow_tags = True
related_object_changelink.short_description = _('change related object')
def generate_related_object_admin_link(self, related_object):
return '../../%s/%s/%s/' % (
related_object._meta.app_label,
related_object._meta.module_name,
related_object.pk
)
def change_view(self, request, object_id, extra_context=None):
"""
Haven't figured out how to edit the related object as an inline.
This template adds a link to the change view of the related
object..
"""
related_object = self.model.objects.get(pk=object_id).content_object
c = {
'admin_url': self.generate_related_object_admin_link(related_object),
'related_object': related_object,
'related_app_label': related_object._meta.app_label,
'related_module_name': related_object._meta.module_name,
}
c.update(extra_context or {})
self.change_form_template = 'admin/flatblocks/change_form_forward.html'
return super(GenericFlatblockAdmin, self).change_view(request, object_id, extra_context=c)
admin.site.register(GenericFlatblock, GenericFlatblockAdmin)
class GenericFlatblockListAdmin(admin.ModelAdmin):
list_display = ('modelname','slug')
form = GenericFlatblockListForm
def modelname(self,obj):
return "%s" % obj.content_type
admin.site.register(GenericFlatblockList, GenericFlatblockListAdmin)
class TempateBlockAdmin(admin.ModelAdmin):
list_display = ('slug','template_path')
prepopulated_fields = {'slug':('name',)}
form = TempateBlockForm
admin.site.register(TemplateBlock,TempateBlockAdmin)
| bsd-2-clause |
marc-sensenich/ansible | lib/ansible/modules/network/f5/bigip_cli_script.py | 14 | 13966 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_cli_script
short_description: Manage CLI scripts on a BIG-IP
description:
- Manages CLI scripts on a BIG-IP. CLI scripts, otherwise known as tmshell scripts
or TMSH scripts allow you to create custom scripts that can run to manage objects
within a BIG-IP.
version_added: 2.7
options:
name:
description:
- Specifies the name of the script.
required: True
content:
description:
- The content of the script.
- This parameter is typically used in conjunction with Ansible's C(file), or
template lookup plugins. If this sounds foreign to you, see the examples
in this documentation.
description:
description:
- Description of the cli script.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the script exists.
- When C(absent), ensures the script is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a cli script from an existing file
bigip_cli_script:
name: foo
content: "{{ lookup('file', '/absolute/path/to/cli/script.tcl') }}"
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a cli script from a jinja template representing a cli script
bigip_cli_script:
name: foo
content: "{{ lookup('template', '/absolute/path/to/cli/script.tcl') }}"
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
param1:
description: The new param1 value of the resource.
returned: changed
type: bool
sample: true
param2:
description: The new param2 value of the resource.
returned: changed
type: str
sample: Foo is bar
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'apiAnonymous': 'content',
'scriptChecksum': 'checksum',
}
api_attributes = [
'apiAnonymous',
'description',
]
returnables = [
'description',
'content',
]
updatables = [
'description',
'content',
]
class ApiParameters(Parameters):
@property
def ignore_verification(self):
return "true"
@property
def content(self):
return self._values['content'].strip()
class ModuleParameters(Parameters):
@property
def ignore_verification(self):
return "true"
@property
def content(self):
if self._values['content'] is None:
return None
return self._values['content'].strip()
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def content(self):
if self.want.content is None:
return None
if self.have.content is None:
return self.want.content
if self.want.content != self.have.content:
return self.want.content
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.want.description != self.have.description:
return self.want.description
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
# Update any missing params
#
# The cli/script API is kinda weird in that it wont let us individually
# PATCH the description. We appear to need to include the content otherwise
# we get errors about us trying to replace procs that are needed by other
# scripts, ie, the script we're trying to update.
params = self.changes.api_params()
if 'description' in params and 'content' not in params:
self.changes.update({'content': self.have.content})
if 'content' in params and 'description' not in params:
self.changes.update({'description': self.have.description})
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/cli/script/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self): # lgtm [py/similar-function]
uri = "https://{0}:{1}/mgmt/tm/cli/script/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
content=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
tamland/actors | actors/utils/ask.py | 2 | 1341 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Thomas Amland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from actors.future import Promise
from actors.ref import ActorRef
class PromiseActorRef(ActorRef):
def __init__(self):
super(PromiseActorRef, self).__init__(None)
self.promise = Promise()
def tell(self, message, sender=None):
self.promise.complete(message)
def ask(actor, message):
"""
Send a message to `actor` and return a :class:`Future` holding a possible
reply.
To receive a result, the actor MUST send a reply to `sender`.
:param actor:
:type actor: :class:`ActorRef`.
:param message:
:type message: :type: Any
:return: A future holding the result.
"""
sender = PromiseActorRef()
actor.tell(message, sender)
return sender.promise.future
| gpl-3.0 |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/lib_openshift/library/oc_label.py | 6 | 59262 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/label -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_label
short_description: Create, modify, and idempotently manage openshift labels.
description:
- Modify openshift labels programmatically.
options:
state:
description:
- State controls the action that will be taken with resource
- 'present' will create or update and object to the desired state
- 'absent' will ensure certain labels are removed
- 'list' will read the labels
- 'add' will insert labels to the already existing labels
default: present
choices: ["present", "absent", "list", "add"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
kind:
description:
- The kind of object that can be managed.
default: node
choices:
- node
- pod
- namespace
aliases: []
labels:
description:
- A list of labels for the resource.
- Each list consists of a key and a value.
- eg, {'key': 'foo', 'value': 'bar'}
required: false
default: None
aliases: []
selector:
description:
- The selector to apply to the resource query
required: false
default: None
aliases: []
author:
- "Joel Diaz <jdiaz@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: Add a single label to a node's existing labels
oc_label:
name: ip-172-31-5-23.ec2.internal
state: add
kind: node
labels:
- key: logging-infra-fluentd
value: 'true'
- name: remove a label from a node
oc_label:
name: ip-172-31-5-23.ec2.internal
state: absent
kind: node
labels:
- key: color
value: blue
- name: Ensure node has these exact labels
oc_label:
name: ip-172-31-5-23.ec2.internal
state: present
kind: node
labels:
- key: color
value: green
- key: type
value: master
- key: environment
value: production
'''
# -*- -*- -*- End included fragment: doc/label -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_label.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCLabel(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kind,
kubeconfig,
labels=None,
selector=None,
verbose=False):
''' Constructor for OCLabel '''
super(OCLabel, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
self.kind = kind
self.labels = labels
self._curr_labels = None
self.selector = selector
@property
def current_labels(self):
'''property for the current labels'''
if self._curr_labels is None:
results = self.get()
self._curr_labels = results['labels']
return self._curr_labels
@current_labels.setter
def current_labels(self, data):
'''property setter for current labels'''
self._curr_labels = data
def compare_labels(self, host_labels):
''' compare incoming labels against current labels'''
for label in self.labels:
if label['key'] not in host_labels or \
label['value'] != host_labels[label['key']]:
return False
return True
def all_user_labels_exist(self):
''' return whether all the labels already exist '''
for current_host_labels in self.current_labels:
rbool = self.compare_labels(current_host_labels)
if not rbool:
return False
return True
def any_label_exists(self):
''' return whether any single label already exists '''
for current_host_labels in self.current_labels:
for label in self.labels:
if label['key'] in current_host_labels:
return True
return False
def get_user_keys(self):
''' go through list of user key:values and return all keys '''
user_keys = []
for label in self.labels:
user_keys.append(label['key'])
return user_keys
def get_current_label_keys(self):
''' collect all the current label keys '''
current_label_keys = []
for current_host_labels in self.current_labels:
for key in current_host_labels.keys():
current_label_keys.append(key)
return list(set(current_label_keys))
def get_extra_current_labels(self):
''' return list of labels that are currently stored, but aren't
in user-provided list '''
extra_labels = []
user_label_keys = self.get_user_keys()
current_label_keys = self.get_current_label_keys()
for current_key in current_label_keys:
if current_key not in user_label_keys:
extra_labels.append(current_key)
return extra_labels
def extra_current_labels(self):
''' return whether there are labels currently stored that user
hasn't directly provided '''
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
return True
return False
def replace(self):
''' replace currently stored labels with user provided labels '''
cmd = self.cmd_template()
# First delete any extra labels
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
for label in extra_labels:
cmd.append("{}-".format(label))
# Now add/modify the user-provided label list
if len(self.labels) > 0:
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
# --overwrite for the case where we are updating existing labels
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def get(self):
'''return label information '''
result_dict = {}
label_list = []
if self.name:
result = self._get(resource=self.kind, name=self.name, selector=self.selector)
if result['results'][0] and 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
else:
result = self._get(resource=self.kind, selector=self.selector)
for item in result['results'][0]['items']:
if 'labels' in item['metadata']:
label_list.append(item['metadata']['labels'])
else:
label_list.append({})
self.current_labels = label_list
result_dict['labels'] = self.current_labels
result_dict['item_count'] = len(self.current_labels)
result['results'] = result_dict
return result
def cmd_template(self):
''' boilerplate oc command for modifying lables on this object '''
# let's build the cmd with what we have passed in
cmd = ["label", self.kind]
if self.selector:
cmd.extend(["--selector", self.selector])
elif self.name:
cmd.extend([self.name])
return cmd
def add(self):
''' add labels '''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def delete(self):
'''delete the labels'''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}-".format(label['key']))
return self.openshift_cmd(cmd)
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode=False):
''' run the idempotent ansible code
prams comes from the ansible portion of this module
check_mode: does the module support check mode. (module.check_mode)
'''
oc_label = OCLabel(params['name'],
params['namespace'],
params['kind'],
params['kubeconfig'],
params['labels'],
params['selector'],
verbose=params['debug'])
state = params['state']
name = params['name']
selector = params['selector']
api_rval = oc_label.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
#######
# Add
#######
if state == 'add':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'add'"}
if not oc_label.all_user_labels_exist():
if check_mode:
return {'changed': False, 'msg': 'Would have performed an addition.'}
api_rval = oc_label.add()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "add"}
return {'changed': False, 'state': "add"}
########
# Delete
########
if state == 'absent':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'absent'"}
if oc_label.any_label_exists():
if check_mode:
return {'changed': False, 'msg': 'Would have performed a delete.'}
api_rval = oc_label.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Update
########
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'present'"}
# if all the labels passed in don't already exist
# or if there are currently stored labels that haven't
# been passed in
if not oc_label.all_user_labels_exist() or \
oc_label.extra_current_labels():
if check_mode:
return {'changed': False, 'msg': 'Would have made changes.'}
api_rval = oc_label.replace()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_label.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'results': 'Unknown state passed. %s' % state,
'state': "unknown"}
# -*- -*- -*- End included fragment: class/oc_label.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_label.py -*- -*- -*-
def main():
''' ansible oc module for labels '''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list', 'add']),
debug=dict(default=False, type='bool'),
kind=dict(default='node', type='str',
choices=['node', 'pod', 'namespace']),
name=dict(default=None, type='str'),
namespace=dict(default=None, type='str'),
labels=dict(default=None, type='list'),
selector=dict(default=None, type='str'),
),
supports_check_mode=True,
mutually_exclusive=(['name', 'selector']),
)
results = OCLabel.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_label.py -*- -*- -*-
| apache-2.0 |
tornadozou/tensorflow | tensorflow/contrib/saved_model/__init__.py | 109 | 1411 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.saved_model.python.saved_model.signature_def_utils import *
# pylint: enable=unused-import,widcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["get_signature_def_by_key"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
tafaRU/l10n-switzerland | __unported__/l10n_ch_sepa/base_sepa/msg_sepa.py | 4 | 3006 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2011 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from StringIO import StringIO
from openerp.osv import orm
from openerp.tools.translate import _
SEPA_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
class MsgSEPA(object):
_xsd_path = None
_xml_data = None
def __init__(self, xsd_path=None):
'''If no xsd is defined, we consider that we will use the basid
iso20022 XSD file
'''
self._xsd_path = xsd_path
def _is_xsd_valid(self):
'''Check the validity of XML data with an XML Schema
Return True if it is valid
Raise an error if no XML data have been defined
Raise an error if XSD file specified is not found'''
if not self._xml_data:
raise orm.except_orm(_('Error'), _('No XML data found'))
try:
f_xsd = open(self._xsd_path)
except:
raise orm.except_orm(_('Error'), _('No XSD file found'))
parser = etree.XMLParser()
xmlschema_doc = etree.parse(f_xsd)
xmlschema = etree.XMLSchema(xmlschema_doc)
xml_data = etree.parse(StringIO(self._xml_data.encode('utf-8')),
parser=parser)
try:
xmlschema.assertValid(xml_data)
except etree.DocumentInvalid, e:
raise orm.except_orm(
_('XML is not Valid !'),
_('The document validation has raised following errors: \n%s')
% e.message)
return True
class MsgSEPAFactory(object):
"""This class is a factory that creates SEPA message in order to allow
redefinition of those message to match with different country
implementations
"""
_register = {}
@classmethod
def register_class(cls, key, class_name, **args):
cls._register[key] = (class_name, args)
@classmethod
def get_instance(cls, key, **args):
(class_name, cargs) = cls._register[key]
args = dict(cargs.items() + args.items())
return class_name(**args)
@classmethod
def has_instance(cls, key):
return key in cls._register
| agpl-3.0 |
linsalrob/EdwardsLab | bin/resample.py | 1 | 1678 | """
Resample 80% of the data and plot a graph of how many new things we see. This is to answer an argument with Geni
"""
import os
import sys
import argparse
import matplotlib.pyplot as plt
from random import shuffle
def resample(size, percent, tries):
if percent > 1:
percent /= 100
# define an array of size size
data = [i for i in range(size)]
# where we put the results as a cumulative total
iterations = []
seen = set()
for t in range(tries):
# randomize the array
shuffle(data)
# see if we have seen percent things
new = 0
resampsize = int(size * percent)
# sys.stderr.write("resampling " + str(resampsize) + " from " + str(size) + "\n")
for i in range(resampsize):
if data[i] not in seen:
seen.add(data[i])
new += 1
if not iterations:
iterations.append(new)
else:
iterations.append(new+iterations[-1])
# now just plot the number of new things as a cumulative total
plt.plot(iterations)
plt.ylabel('New numbers seen')
plt.xlabel('Iteration')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Resample a list of numbers to see the new things seen")
parser.add_argument('-s', help='Size of array to resample from (size of dataset)', type=int, required=True)
parser.add_argument('-p', help='Percent to resample at each iteration (float)', type=float, required=True)
parser.add_argument('-i', help='Number of iterations to run', type=int, required=True)
args = parser.parse_args()
resample(args.s, args.p, args.i) | mit |
kyuupichan/electrum | lib/qrscanner.py | 1 | 3449 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import ctypes
if sys.platform == 'darwin':
name = 'libzbar.dylib'
elif sys.platform in ('windows', 'win32'):
name = 'libzbar-0.dll'
else:
name = 'libzbar.so.0'
try:
libzbar = ctypes.cdll.LoadLibrary(name)
except BaseException:
libzbar = None
def scan_barcode(device='', timeout=-1, display=True, threaded=False, try_again=True):
if libzbar is None:
raise RuntimeError("Cannot start QR scanner; zbar not available.")
libzbar.zbar_symbol_get_data.restype = ctypes.c_char_p
libzbar.zbar_processor_create.restype = ctypes.POINTER(ctypes.c_int)
libzbar.zbar_processor_get_results.restype = ctypes.POINTER(ctypes.c_int)
libzbar.zbar_symbol_set_first_symbol.restype = ctypes.POINTER(ctypes.c_int)
proc = libzbar.zbar_processor_create(threaded)
libzbar.zbar_processor_request_size(proc, 640, 480)
if libzbar.zbar_processor_init(proc, device.encode('utf-8'), display) != 0:
if try_again:
# workaround for a bug in "ZBar for Windows"
# libzbar.zbar_processor_init always seem to fail the first time around
return scan_barcode(device, timeout, display, threaded, try_again=False)
raise RuntimeError("Can not start QR scanner; initialization failed.")
libzbar.zbar_processor_set_visible(proc)
if libzbar.zbar_process_one(proc, timeout):
symbols = libzbar.zbar_processor_get_results(proc)
else:
symbols = None
libzbar.zbar_processor_destroy(proc)
if symbols is None:
return
if not libzbar.zbar_symbol_set_get_size(symbols):
return
symbol = libzbar.zbar_symbol_set_first_symbol(symbols)
data = libzbar.zbar_symbol_get_data(symbol)
return data.decode('utf8')
def _find_system_cameras():
device_root = "/sys/class/video4linux"
devices = {} # Name -> device
if os.path.exists(device_root):
for device in os.listdir(device_root):
try:
with open(os.path.join(device_root, device, 'name')) as f:
name = f.read()
except IOError:
continue
name = name.strip('\n')
devices[name] = os.path.join("/dev", device)
return devices
if __name__ == "__main__":
print(scan_barcode())
| mit |
grupoprog3/proyecto_final | Entrega Final/flask/Lib/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 360 | 18615 | from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
from socket import timeout as SocketTimeout
from socket import error as SocketError
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
@property
def connection(self):
return self._connection
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b'')
return buf + self._decoder.flush()
return b''
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
data = None
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if data:
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
with self._error_catcher():
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
| apache-2.0 |
ericpre/hyperspy | hyperspy/tests/io/test_edax.py | 1 | 19259 | import gc
import hashlib
import os
import os.path
import tempfile
import zipfile
import numpy as np
import pytest
import requests
from hyperspy import signals
from hyperspy.io import load
MY_PATH = os.path.dirname(__file__)
ZIPF = os.path.join(MY_PATH, "edax_files.zip")
TMP_DIR = tempfile.TemporaryDirectory()
TEST_FILES_OK = os.path.isfile(ZIPF)
REASON = ""
SHA256SUM = "e217c71efbd208da4b52e9cf483443f9da2175f2924a96447ed393086fe32008"
# The test files are not included in HyperSpy v1.4 because their file size is 36.5MB
# taking the HyperSpy source distribution file size above PyPI's 60MB limit.
# As a temporary solution, we attempt to download the test files from GitHub
# and skip the tests if the download fails.
if not TEST_FILES_OK:
try:
r = requests.get(
"https://github.com/hyperspy/hyperspy/blob/e7a323a3bb9b237c24bd9267d2cc4fcb31bb99f3/hyperspy/tests/io/edax_files.zip?raw=true")
SHA256SUM_GOT = hashlib.sha256(r.content).hexdigest()
if SHA256SUM_GOT == SHA256SUM:
with open(ZIPF, 'wb') as f:
f.write(r.content)
TEST_FILES_OK = True
else:
REASON = "wrong sha256sum of downloaded file. Expected: %s, got: %s" % SHA256SUM, SHA256SUM_GOT
except BaseException as e:
REASON = "download of EDAX test files failed: %s" % e
def setup_module():
if TEST_FILES_OK:
with zipfile.ZipFile(ZIPF, 'r') as zipped:
zipped.extractall(TMP_DIR.name)
pytestmark = pytest.mark.skipif(not TEST_FILES_OK,
reason=REASON)
def teardown_module():
TMP_DIR.cleanup()
class TestSpcSpectrum_v061_xrf:
@classmethod
def setup_class(cls):
cls.spc = load(os.path.join(TMP_DIR.name, "spc0_61-ipr333_xrf.spc"))
cls.spc_loadAll = load(os.path.join(TMP_DIR.name,
"spc0_61-ipr333_xrf.spc"),
load_all_spc=True)
@classmethod
def teardown_class(cls):
del cls.spc, cls.spc_loadAll
gc.collect()
def test_data(self):
# test datatype
assert np.uint32 == TestSpcSpectrum_v061_xrf.spc.data.dtype
# test data shape
assert (4000,) == TestSpcSpectrum_v061_xrf.spc.data.shape
# test 40 datapoints
assert (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 319, 504, 639, 924,
1081, 1326, 1470, 1727, 1983, 2123, 2278, 2509, 2586, 2639,
2681, 2833, 2696, 2704, 2812, 2745, 2709, 2647, 2608, 2620,
2571, 2669] == TestSpcSpectrum_v061_xrf.spc.data[:40].tolist())
def test_parameters(self):
elements = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Acquisition_instrument']['SEM'] # this will eventually need to
# be changed when XRF-specific
# features are added
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Signal']
# Testing SEM parameters
np.testing.assert_allclose(30, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(45, eds_dict['azimuth_angle'])
np.testing.assert_allclose(35, eds_dict['elevation_angle'])
np.testing.assert_allclose(137.92946, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2561.0, eds_dict['live_time'], atol=1E-6)
# Testing elements
assert ({'Al', 'Ca', 'Cl', 'Cr', 'Fe', 'K', 'Mg', 'Mn', 'Si', 'Y'} ==
set(elements))
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpcSpectrum_v061_xrf.spc, signals.EDSSEMSpectrum)
def test_axes(self):
spc_ax_manager = {'axis-0': {'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 4000,
'units': 'keV'}}
assert (spc_ax_manager ==
TestSpcSpectrum_v061_xrf.spc.axes_manager.as_dictionary())
def test_load_all_spc(self):
spc_header = TestSpcSpectrum_v061_xrf.spc_loadAll.original_metadata[
'spc_header']
np.testing.assert_allclose(4, spc_header['analysisType'])
np.testing.assert_allclose(4, spc_header['analyzerType'])
np.testing.assert_allclose(2013, spc_header['collectDateYear'])
np.testing.assert_allclose(9, spc_header['collectDateMon'])
np.testing.assert_allclose(26, spc_header['collectDateDay'])
np.testing.assert_equal(b'Garnet1.', spc_header['fileName'].view('|S8')[0])
np.testing.assert_allclose(45, spc_header['xRayTubeZ'])
class TestSpcSpectrum_v070_eds:
@classmethod
def setup_class(cls):
cls.spc = load(os.path.join(TMP_DIR.name, "single_spect.spc"))
cls.spc_loadAll = load(os.path.join(TMP_DIR.name,
"single_spect.spc"),
load_all_spc=True)
@classmethod
def teardown_class(cls):
del cls.spc, cls.spc_loadAll
gc.collect()
def test_data(self):
# test datatype
assert np.uint32 == TestSpcSpectrum_v070_eds.spc.data.dtype
# test data shape
assert (4096,) == TestSpcSpectrum_v070_eds.spc.data.shape
# test 1st 20 datapoints
assert (
[0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 10, 4, 10, 10, 45, 87, 146, 236,
312, 342] == TestSpcSpectrum_v070_eds.spc.data[:20].tolist())
def test_parameters(self):
elements = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Signal']
# Testing SEM parameters
np.testing.assert_allclose(22, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(0, eds_dict['azimuth_angle'])
np.testing.assert_allclose(34, eds_dict['elevation_angle'])
np.testing.assert_allclose(129.31299, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(50.000004, eds_dict['live_time'], atol=1E-6)
# Testing elements
assert ({'Al', 'C', 'Ce', 'Cu', 'F', 'Ho', 'Mg', 'O'} ==
set(elements))
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpcSpectrum_v070_eds.spc, signals.EDSSEMSpectrum)
def test_axes(self):
spc_ax_manager = {'axis-0': {'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 4096,
'units': 'keV'}}
assert (spc_ax_manager ==
TestSpcSpectrum_v070_eds.spc.axes_manager.as_dictionary())
def test_load_all_spc(self):
spc_header = TestSpcSpectrum_v070_eds.spc_loadAll.original_metadata[
'spc_header']
np.testing.assert_allclose(4, spc_header['analysisType'])
np.testing.assert_allclose(5, spc_header['analyzerType'])
np.testing.assert_allclose(2016, spc_header['collectDateYear'])
np.testing.assert_allclose(4, spc_header['collectDateMon'])
np.testing.assert_allclose(19, spc_header['collectDateDay'])
np.testing.assert_equal(b'C:\\ProgramData\\EDAX\\jtaillon\\Cole\\Mapping\\Lsm\\'
b'GFdCr\\950\\Area 1\\spectrum20160419153851427_0.spc',
spc_header['longFileName'].view('|S256')[0])
np.testing.assert_allclose(0, spc_header['xRayTubeZ'])
class TestSpdMap_070_eds:
@classmethod
def setup_class(cls):
cls.spd = load(os.path.join(TMP_DIR.name, "spd_map.spd"),
convert_units=True)
@classmethod
def teardown_class(cls):
del cls.spd
gc.collect()
def test_data(self):
# test d_type
assert np.uint16 == TestSpdMap_070_eds.spd.data.dtype
# test d_shape
assert (200, 256, 2500) == TestSpdMap_070_eds.spd.data.shape
assert ([[[0, 0, 0, 0, 0], # test random data
[0, 0, 1, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]]] ==
TestSpdMap_070_eds.spd.data[15:20, 15:20, 15:20].tolist())
def test_parameters(self):
elements = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()['Signal']
# Testing SEM parameters
np.testing.assert_allclose(22, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(0, eds_dict['azimuth_angle'])
np.testing.assert_allclose(34, eds_dict['elevation_angle'])
np.testing.assert_allclose(126.60252, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2621.4399, eds_dict['live_time'], atol=1E-4)
# Testing elements
assert {'Ce', 'Co', 'Cr', 'Fe', 'Gd', 'La', 'Mg', 'O',
'Sr'} == set(elements)
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpdMap_070_eds.spd, signals.EDSSEMSpectrum)
def test_axes(self):
spd_ax_manager = {'axis-0': {'name': 'y',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 14.227345585823057,
'size': 200,
'units': 'nm'},
'axis-1': {'name': 'x',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 14.235896058380602,
'size': 256,
'units': 'nm'},
'axis-2': {'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.0050000000000000001,
'size': 2500,
'units': 'keV'}}
assert (spd_ax_manager ==
TestSpdMap_070_eds.spd.axes_manager.as_dictionary())
def test_ipr_reading(self):
ipr_header = TestSpdMap_070_eds.spd.original_metadata['ipr_header']
np.testing.assert_allclose(0.014235896, ipr_header['mppX'])
np.testing.assert_allclose(0.014227346, ipr_header['mppY'])
def test_spc_reading(self):
# Test to make sure that spc metadata matches spd metadata
spc_header = TestSpdMap_070_eds.spd.original_metadata['spc_header']
elements = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
np.testing.assert_allclose(spc_header.azimuth,
eds_dict['azimuth_angle'])
np.testing.assert_allclose(spc_header.detReso,
eds_dict['energy_resolution_MnKa'])
np.testing.assert_allclose(spc_header.elevation,
eds_dict['elevation_angle'])
np.testing.assert_allclose(spc_header.liveTime,
eds_dict['live_time'])
np.testing.assert_allclose(spc_header.evPerChan,
TestSpdMap_070_eds.spd.axes_manager[2].scale * 1000)
np.testing.assert_allclose(spc_header.kV,
sem_dict['beam_energy'])
np.testing.assert_allclose(spc_header.numElem,
len(elements))
class TestSpdMap_061_xrf:
@classmethod
def setup_class(cls):
cls.spd = load(os.path.join(TMP_DIR.name, "spc0_61-ipr333_xrf.spd"),
convert_units=True)
@classmethod
def teardown_class(cls):
del cls.spd
gc.collect()
def test_data(self):
# test d_type
assert np.uint16 == TestSpdMap_061_xrf.spd.data.dtype
# test d_shape
assert (200, 256, 2000) == TestSpdMap_061_xrf.spd.data.shape
assert ([[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]] ==
TestSpdMap_061_xrf.spd.data[15:20, 15:20, 15:20].tolist())
def test_parameters(self):
elements = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Sample'][
'elements']
sem_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Signal']
# Testing SEM parameters
np.testing.assert_allclose(30, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(45, eds_dict['azimuth_angle'])
np.testing.assert_allclose(35, eds_dict['elevation_angle'])
np.testing.assert_allclose(137.92946, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2561.0, eds_dict['live_time'], atol=1E-4)
# Testing elements
assert {'Al', 'Ca', 'Cl', 'Cr', 'Fe', 'K', 'Mg', 'Mn', 'Si',
'Y'} == set(elements)
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpdMap_061_xrf.spd, signals.EDSSEMSpectrum)
def test_axes(self):
spd_ax_manager = {'axis-0': {'name': 'y',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 0.5651920166015625,
'size': 200,
'units': 'mm'},
'axis-1': {'name': 'x',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 0.5651920166015625,
'size': 256,
'units': 'mm'},
'axis-2': {'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 2000,
'units': 'keV'}}
assert (spd_ax_manager ==
TestSpdMap_061_xrf.spd.axes_manager.as_dictionary())
def test_ipr_reading(self):
ipr_header = TestSpdMap_061_xrf.spd.original_metadata['ipr_header']
np.testing.assert_allclose(565.1920166015625, ipr_header['mppX'])
np.testing.assert_allclose(565.1920166015625, ipr_header['mppY'])
def test_spc_reading(self):
# Test to make sure that spc metadata matches spd_061_xrf metadata
spc_header = TestSpdMap_061_xrf.spd.original_metadata['spc_header']
elements = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Sample'][
'elements']
sem_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
np.testing.assert_allclose(spc_header.azimuth,
eds_dict['azimuth_angle'])
np.testing.assert_allclose(spc_header.detReso,
eds_dict['energy_resolution_MnKa'])
np.testing.assert_allclose(spc_header.elevation,
eds_dict['elevation_angle'])
np.testing.assert_allclose(spc_header.liveTime,
eds_dict['live_time'])
np.testing.assert_allclose(spc_header.evPerChan,
TestSpdMap_061_xrf.spd.axes_manager[2].scale * 1000)
np.testing.assert_allclose(spc_header.kV,
sem_dict['beam_energy'])
np.testing.assert_allclose(spc_header.numElem,
len(elements))
| gpl-3.0 |
nansencenter/DAPPER | dapper/da_methods/__init__.py | 1 | 5619 | """Contains the data assimilation methods included with DAPPER.
See the README section on
[DA Methods](https://github.com/nansencenter/DAPPER#DA-Methods)
for an overview of the methods included with DAPPER.
## Defining your own method
Follow the example of one of the methods within one of the
sub-directories/packages.
The simplest example is perhaps
`dapper.da_methods.ensemble.EnKF`.
## General advice for programming/debugging scientific experiments
- Start with something simple.
This helps make sure the basics of the experiment are reasonable.
For example, start with
- a pre-existing example,
- something you are able to reproduce,
- a small/simple model.
- Set the observation error to be small.
- Observe everything.
- Don't include model error and/or noise to begin with.
- Additionally, test a simple/baseline method to begin with.
When including an ensemble method, start with using a large ensemble,
and introduce localisation later.
- Take incremental steps towards your ultimate experiment setup.
Validate each incremental setup with prints/plots.
If results change, make sure you understand why.
- Use short experiment duration.
You probably don't need statistical significance while debugging.
"""
import dataclasses
import functools
import time
from dataclasses import dataclass
import dapper.stats
def da_method(*default_dataclasses):
"""Turn a dataclass-style class into a DA method for DAPPER (`xp`).
This decorator applies to classes that define DA methods.
An instances of the resulting class is referred to (in DAPPER)
as an `xp` (short for experiment).
The decorated classes are defined like a `dataclass`,
but are decorated by `@da_method()` instead of `@dataclass`.
.. note::
The classes must define a method called `assimilate`.
This method gets slightly enhanced by this wrapper which provides:
- Initialisation of the `Stats` object, accessible by `self.stats`.
- `fail_gently` functionality.
- Duration timing
- Progressbar naming magic.
Example:
>>> @da_method()
... class Sleeper():
... "Do nothing."
... seconds : int = 10
... success : bool = True
... def assimilate(self, *args, **kwargs):
... for k in range(self.seconds):
... time.sleep(1)
... if not self.success:
... raise RuntimeError("Sleep over. Failing as intended.")
Internally, `da_method` is just like `dataclass`,
except that adds an outer layer
(hence the empty parantheses in the above)
which enables defining default parameters which can be inherited,
similar to subclassing.
Example:
>>> class ens_defaults:
... infl : float = 1.0
... rot : bool = False
>>> @da_method(ens_defaults)
... class EnKF:
... N : int
... upd_a : str = "Sqrt"
...
... def assimilate(self, HMM, xx, yy):
... ...
"""
def dataclass_with_defaults(cls):
"""Like `dataclass`, but add some DAPPER-specific things.
This adds `__init__`, `__repr__`, `__eq__`, ...,
but also includes inherited defaults,
ref https://stackoverflow.com/a/58130805,
and enhances the `assimilate` method.
"""
def set_field(name, type_, val):
"""Set the inherited (i.e. default, i.e. has value) field."""
# Ensure annotations
cls.__annotations__ = getattr(cls, '__annotations__', {})
# Set annotation
cls.__annotations__[name] = type_
# Set value
setattr(cls, name, val)
# APPend default fields without overwriting.
# NB: Don't implement (by PREpending?) non-default args -- to messy!
for default_params in default_dataclasses:
# NB: Calling dataclass twice always makes repr=True
for field in dataclasses.fields(dataclass(default_params)):
if field.name not in cls.__annotations__:
set_field(field.name, field.type, field)
# Create new class (NB: old/new classes have same id)
cls = dataclass(cls)
# The new assimilate method
def assimilate(self, HMM, xx, yy, desc=None, **stat_kwargs):
# Progressbar name
pb_name_hook = self.da_method if desc is None else desc # noqa
# Init stats
self.stats = dapper.stats.Stats(self, HMM, xx, yy, **stat_kwargs)
# Assimilate
time_start = time.time()
_assimilate(self, HMM, xx, yy)
dapper.stats.register_stat(
self.stats, "duration", time.time()-time_start)
# Overwrite the assimilate method with the new one
try:
_assimilate = cls.assimilate
except AttributeError as error:
raise AttributeError(
"Classes decorated by da_method()"
" must define a method called 'assimilate'.") from error
cls.assimilate = functools.wraps(_assimilate)(assimilate)
# Make self.__class__.__name__ an attrib.
# Used by xpList.split_attrs().
cls.da_method = cls.__name__
return cls
return dataclass_with_defaults
from .baseline import Climatology, OptInterp, Var3D
from .ensemble import LETKF, SL_EAKF, EnKF, EnKF_N, EnKS, EnRTS
from .extended import ExtKF, ExtRTS
from .other import LNETF, RHF
from .particle import OptPF, PartFilt, PFa, PFxN, PFxN_EnKF
from .variational import Var4D, iEnKS
| mit |
carsonmcdonald/electron | script/upload-index-json.py | 105 | 1152 | #!/usr/bin/env python
import os
import sys
from lib.config import PLATFORM, s3_config
from lib.util import atom_gyp, execute, s3put, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
PROJECT_NAME = atom_gyp()['project_name%']
PRODUCT_NAME = atom_gyp()['product_name%']
def main():
# Upload the index.json.
with scoped_cwd(SOURCE_ROOT):
if sys.platform == 'darwin':
atom_shell = os.path.join(OUT_DIR, '{0}.app'.format(PRODUCT_NAME),
'Contents', 'MacOS', PRODUCT_NAME)
elif sys.platform == 'win32':
atom_shell = os.path.join(OUT_DIR, '{0}.exe'.format(PROJECT_NAME))
else:
atom_shell = os.path.join(OUT_DIR, PROJECT_NAME)
index_json = os.path.relpath(os.path.join(OUT_DIR, 'index.json'))
execute([atom_shell,
os.path.join('tools', 'dump-version-info.js'),
index_json])
bucket, access_key, secret_key = s3_config()
s3put(bucket, access_key, secret_key, OUT_DIR, 'atom-shell/dist',
[index_json])
if __name__ == '__main__':
sys.exit(main())
| mit |
inspirehep/invenio | modules/bibformat/lib/bibformat_engine.py | 2 | 93734 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2013, 2016, 2020 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Formats a single XML Marc record using specified format.
There is no API for the engine. Instead use module L{bibformat}.
You can have a look at the various escaping modes available in
X{BibFormatObject} in function L{escape_field}
Still it is useful sometimes for debugging purpose to use the
L{BibFormatObject} class directly. For eg:
>>> from invenio.bibformat_engine import BibFormatObject
>>> bfo = BibFormatObject(102)
>>> bfo.field('245__a')
The order Rodentia in South America
>>> from invenio.bibformat_elements import bfe_title
>>> bfe_title.format_element(bfo)
The order Rodentia in South America
@see: bibformat.py, bibformat_utils.py
"""
__revision__ = "$Id$"
import re
import sys
import os
import inspect
import traceback
import cgi
from functools import wraps
from invenio.errorlib import register_exception
from invenio.config import \
CFG_SITE_LANG, \
CFG_BIBFORMAT_CACHED_FORMATS, \
CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS, \
CFG_BIBFORMAT_HIDDEN_TAGS
from invenio.bibrecord import \
create_record, \
record_get_field_instances, \
record_get_field_value, \
record_get_field_values, \
record_xml_output, \
record_empty
from invenio import bibformat_xslt_engine
from invenio.messages import \
language_list_long, \
wash_language, \
gettext_set_language
from invenio import bibformat_dblayer
from invenio.bibformat_config import \
CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION, \
CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION, \
CFG_BIBFORMAT_TEMPLATES_PATH, \
CFG_BIBFORMAT_ELEMENTS_PATH, \
CFG_BIBFORMAT_OUTPUTS_PATH, \
CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH, \
InvenioBibFormatError
from invenio.bibformat_utils import \
record_get_xml, \
parse_tag
from invenio.htmlutils import \
HTMLWasher, \
CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST, \
CFG_HTML_BUFFER_ALLOWED_ATTRIBUTE_WHITELIST
from invenio.textutils import escape_latex
from invenio.webuser import collect_user_info
from invenio.bibknowledge import get_kbr_values
from HTMLParser import HTMLParseError
from invenio.access_control_engine import acc_authorize_action
# Cache for data we have already read and parsed
format_templates_cache = {}
format_elements_cache = {}
format_outputs_cache = {}
html_field = '<!--HTML-->' # String indicating that field should be
# treated as HTML (and therefore no escaping of
# HTML tags should occur.
# Appears in some field values.
washer = HTMLWasher() # Used to remove dangerous tags from HTML
# sources
# Regular expression for finding <lang>...</lang> tag in format templates
pattern_lang = re.compile(r'''
<lang #<lang tag (no matter case)
\s* #any number of white spaces
> #closing <lang> start tag
(?P<langs>.*?) #anything but the next group (greedy)
(</lang\s*>) #end tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Builds regular expression for finding each known language in <lang> tags
ln_pattern_text = r"<("
for lang in language_list_long(enabled_langs_only=False):
ln_pattern_text += lang[0] +r"|"
ln_pattern_text = ln_pattern_text.rstrip(r"|")
ln_pattern_text += r")>(.*?)</\1>"
ln_pattern = re.compile(ln_pattern_text, re.IGNORECASE | re.DOTALL)
# Regular expression for finding text to be translated
TRANSLATION_PATTERN = re.compile(r'_\((?P<word>.*?)\)_',
re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <name> tag in format templates
pattern_format_template_name = re.compile(r'''
<name #<name tag (no matter case)
\s* #any number of white spaces
> #closing <name> start tag
(?P<name>.*?) #name value. any char that is not end tag
(</name\s*>)(\n)? #end tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <description> tag in format templates
pattern_format_template_desc = re.compile(r'''
<description #<decription tag (no matter case)
\s* #any number of white spaces
> #closing <description> start tag
(?P<desc>.*?) #description value. any char that is not end tag
</description\s*>(\n)? #end tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <BFE_ > tags in format templates
pattern_tag = re.compile(r'''
<BFE_ #every special tag starts with <BFE_ (no matter case)
(?P<function_name>[^/\s]+) #any char but a space or slash
\s* #any number of spaces
(?P<params>(\s* #params here
(?P<param>([^=\s])*)\s* #param name: any chars that is not a white space or equality. Followed by space(s)
=\s* #equality: = followed by any number of spaces
(?P<sep>[\'"]) #one of the separators
(?P<value>.*?) #param value: any chars that is not a separator like previous one
(?P=sep) #same separator as starting one
)*) #many params
\s* #any number of spaces
(/)?> #end of the tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding params inside <BFE_ > tags in format templates
pattern_function_params = re.compile(r'''
(?P<param>([^=\s])*)\s* # Param name: any chars that is not a white space or equality. Followed by space(s)
=\s* # Equality: = followed by any number of spaces
(?P<sep>[\'"]) # One of the separators
(?P<value>.*?) # Param value: any chars that is not a separator like previous one
(?P=sep) # Same separator as starting one
''', re.VERBOSE | re.DOTALL)
# Regular expression for finding format elements "params" attributes
# (defined by @param)
pattern_format_element_params = re.compile(r'''
@param\s* # Begins with AT param keyword followed by space(s)
(?P<name>[^\s=]*):\s* # A single keyword and comma, then space(s)
#(=\s*(?P<sep>[\'"]) # Equality, space(s) and then one of the separators
#(?P<default>.*?) # Default value: any chars that is not a separator like previous one
#(?P=sep) # Same separator as starting one
#)?\s* # Default value for param is optional. Followed by space(s)
(?P<desc>.*) # Any text that is not end of line (thanks to MULTILINE parameter)
''', re.VERBOSE | re.MULTILINE)
# Regular expression for finding format elements "see also" attribute
# (defined by @see)
pattern_format_element_seealso = re.compile(r'''@see:\s*(?P<see>.*)''',
re.VERBOSE | re.MULTILINE)
#Regular expression for finding 2 expressions in quotes, separated by
#comma (as in template("1st","2nd") )
#Used when parsing output formats
## pattern_parse_tuple_in_quotes = re.compile('''
## (?P<sep1>[\'"])
## (?P<val1>.*)
## (?P=sep1)
## \s*,\s*
## (?P<sep2>[\'"])
## (?P<val2>.*)
## (?P=sep2)
## ''', re.VERBOSE | re.MULTILINE)
def format_record(recID, of, ln=CFG_SITE_LANG, verbose=0,
search_pattern=None, xml_record=None, user_info=None):
"""
Formats a record given output format. Main entry function of
bibformat engine.
Returns a formatted version of the record in the specified
language, search pattern, and with the specified output format.
The function will define which format template must be applied.
You can either specify an record ID to format, or give its xml
representation. if 'xml_record' is not None, then use it instead
of recID.
'user_info' allows to grant access to some functionalities on a
page depending on the user's priviledges. 'user_info' is the same
object as the one returned by 'webuser.collect_user_info(req)'
@param recID: the ID of record to format
@param of: an output format code (or short identifier for the output format)
@param ln: the language to use to format the record
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings, stop if error in format elements
9: errors and warnings, stop if error (debug mode ))
@param search_pattern: list of strings representing the user request in web interface
@param xml_record: an xml string representing the record to format
@param user_info: the information of the user who will view the formatted page
@return: formatted record
"""
if search_pattern is None:
search_pattern = []
out = ""
ln = wash_language(ln)
_ = gettext_set_language(ln)
# Temporary workflow (during migration of formats):
# Call new BibFormat
# But if format not found for new BibFormat, then call old BibFormat
#Create a BibFormat Object to pass that contain record and context
bfo = BibFormatObject(recID, ln, search_pattern, xml_record, user_info, of)
if of.lower() != 'xm' and (not bfo.get_record()
or record_empty(bfo.get_record())):
# Record only has recid: do not format, excepted
# for xm format
return "", False
#Find out which format template to use based on record and output format.
template = decide_format_template(bfo, of)
if verbose == 9 and template is not None:
out += """\n<br/><span class="quicknote">
Using %s template for record %i.
</span>""" % (template, recID)
path = "%s%s%s" % (CFG_BIBFORMAT_TEMPLATES_PATH, os.sep, template)
if template is None or not os.access(path, os.R_OK):
# template not found in new BibFormat. Call old one
if verbose == 9:
if template is None:
out += """\n<br/><span class="quicknote">
No template found for output format %s and record %i.
(Check invenio.err log file for more details)
</span>""" % (of, recID)
else:
out += """\n<br/><span class="quicknote">
Template %s could not be read.
</span>""" % (template)
try:
raise InvenioBibFormatError(_('No template could be found for output format %s.') % of)
except InvenioBibFormatError, exc:
register_exception(req=bfo.req)
if verbose > 5:
out += """\n<br/><span class="quicknote">
%s
</span>""" % str(exc)
return out, False
# Format with template
out_, needs_2nd_pass = format_with_format_template(template, bfo, verbose)
out += out_
return out, needs_2nd_pass
def format_record_1st_pass(recID, of, ln=CFG_SITE_LANG, verbose=0,
search_pattern=None, xml_record=None,
user_info=None, on_the_fly=False,
save_missing=True):
"""
Format a record in given output format.
Return a formatted version of the record in the specified
language, search pattern, and with the specified output format.
The function will define which format template must be applied.
The record to be formatted can be specified with its ID (with
'recID' parameter) or given as XML representation (with
'xml_record' parameter). If 'xml_record' is specified 'recID' is
ignored (but should still be given for reference. A dummy recid 0
or -1 could be used).
'user_info' allows to grant access to some functionalities on a
page depending on the user's priviledges. The 'user_info' object
makes sense only in the case of on-the-fly formatting. 'user_info'
is the same object as the one returned by
'webuser.collect_user_info(req)'
@param recID: the ID of record to format.
@type recID: int
@param of: an output format code (or short identifier for the output format)
@type of: string
@param ln: the language to use to format the record
@type ln: string
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings, stop if error in format elements
9: errors and warnings, stop if error (debug mode ))
@type verbose: int
@param search_pattern: list of strings representing the user request in web interface
@type search_pattern: list(string)
@param xml_record: an xml string represention of the record to format
@type xml_record: string or None
@param user_info: the information of the user who will view the formatted page (if applicable)
@param on_the_fly: if False, try to return an already preformatted version of the record in the database
@type on_the_fly: boolean
@return: formatted record
@rtype: string
"""
from invenio.search_engine import record_exists
if search_pattern is None:
search_pattern = []
out = ""
if verbose == 9:
out += """\n<span class="quicknote">
Formatting record %i with output format %s.
</span>""" % (recID, of)
if not on_the_fly and \
(ln == CFG_SITE_LANG or
of.lower() == 'xm' or
(of.lower() in CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS)) and \
record_exists(recID) != -1:
# Try to fetch preformatted record. Only possible for records
# formatted in CFG_SITE_LANG language (other are never
# stored), or of='xm' which does not depend on language.
# Exceptions are made for output formats defined in
# CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS, which are
# always served from the same cache for any language. Also,
# do not fetch from DB when record has been deleted: we want
# to return an "empty" record in that case
res, needs_2nd_pass = bibformat_dblayer.get_preformatted_record(recID, of)
if res is not None:
# record 'recID' is formatted in 'of', so return it
if verbose == 9:
last_updated = bibformat_dblayer.get_preformatted_record_date(recID, of)
out += """\n<br/><span class="quicknote">
Found preformatted output for record %i (cache updated on %s).
</span><br/>""" % (recID, last_updated)
if of.lower() == 'xm':
res = filter_hidden_fields(res, user_info)
# try to replace language links in pre-cached res, if applicable:
if ln != CFG_SITE_LANG and of.lower() in CFG_BIBFORMAT_DISABLE_I18N_FOR_CACHED_FORMATS:
# The following statements try to quickly replace any
# language arguments in URL links. Not an exact
# science, but should work most of the time for most
# of the formats, with not too many false positives.
# We don't have time to parse output much here.
res = res.replace('?ln=' + CFG_SITE_LANG, '?ln=' + ln)
res = res.replace('&ln=' + CFG_SITE_LANG, '&ln=' + ln)
res = res.replace('&ln=' + CFG_SITE_LANG, '&ln=' + ln)
out += res
return out, needs_2nd_pass
else:
if verbose == 9:
out += """\n<br/><span class="quicknote">
No preformatted output found for record %s.
</span>"""% recID
# Live formatting of records in all other cases
if verbose == 9:
out += """\n<br/><span class="quicknote">
Formatting record %i on-the-fly.
</span>""" % recID
try:
out_, needs_2nd_pass = format_record(recID=recID,
of=of,
ln=ln,
verbose=verbose,
search_pattern=search_pattern,
xml_record=xml_record,
user_info=user_info)
out += out_
if of.lower() in ('xm', 'xoaimarc'):
out = filter_hidden_fields(out, user_info, force_filtering=of.lower()=='xoaimarc')
# We have spent time computing this format
# We want to save this effort if the format is cached
if save_missing and recID and ln == CFG_SITE_LANG \
and of.lower() in CFG_BIBFORMAT_CACHED_FORMATS and verbose == 0:
bibformat_dblayer.save_preformatted_record(recID,
of,
out,
needs_2nd_pass)
return out, needs_2nd_pass
except Exception, e:
register_exception(prefix="An error occured while formatting record %s in %s" %
(recID, of),
alert_admin=True)
#Failsafe execution mode
import invenio.template
websearch_templates = invenio.template.load('websearch')
if verbose == 9:
out += """\n<br/><span class="quicknote">
An error occured while formatting record %s. (%s)
</span>""" % (recID, str(e))
if of.lower() == 'hd':
if verbose == 9:
out += """\n<br/><span class="quicknote">
Formatting record %i with websearch_templates.tmpl_print_record_detailed.
</span><br/>""" % recID
return out + websearch_templates.tmpl_print_record_detailed(
ln=ln,
recID=recID,
)
if verbose == 9:
out += """\n<br/><span class="quicknote">
Formatting record %i with websearch_templates.tmpl_print_record_brief.
</span><br/>""" % recID
return out + websearch_templates.tmpl_print_record_brief(ln=ln,
recID=recID,
), False
def format_record_2nd_pass(recID, template, ln=CFG_SITE_LANG,
search_pattern=None, xml_record=None,
user_info=None, of=None, verbose=0):
# Create light bfo object
bfo = BibFormatObject(recID, ln, search_pattern, xml_record, user_info, of)
# Translations
template = translate_template(template, ln)
# Format template
r, dummy = format_with_format_template(format_template_filename=None,
format_template_code=template,
bfo=bfo,
verbose=verbose)
return r
def decide_format_template(bfo, of):
"""
Returns the format template name that should be used for formatting
given output format and L{BibFormatObject}.
Look at of rules, and take the first matching one.
If no rule matches, returns None
To match we ignore lettercase and spaces before and after value of
rule and value of record
@param bfo: a L{BibFormatObject}
@param of: the code of the output format to use
@return: name of a format template
"""
output_format = get_output_format(of)
for rule in output_format['rules']:
if rule['field'].startswith('00'):
# Rule uses controlfield
values = [bfo.control_field(rule['field']).strip()] #Remove spaces
else:
# Rule uses datafield
values = bfo.fields(rule['field'])
# loop over multiple occurences, but take the first match
if len(values) > 0:
for value in values:
value = value.strip() #Remove spaces
pattern = rule['value'].strip() #Remove spaces
match_obj = re.match(pattern, value, re.IGNORECASE)
if match_obj is not None and \
match_obj.end() == len(value):
return rule['template']
template = output_format['default']
if template != '':
return template
else:
return None
def translate_template(template, ln=CFG_SITE_LANG):
_ = gettext_set_language(ln)
def translate(match):
"""
Translate matching values
"""
word = match.group("word")
translated_word = _(word)
return translated_word
filtered_template = filter_languages(template, ln)
evaluated_format = TRANSLATION_PATTERN.sub(translate, filtered_template)
return evaluated_format
def format_with_format_template(format_template_filename, bfo,
verbose=0, format_template_code=None):
""" Format a record given a
format template.
Returns a formatted version of the record represented by bfo,
in the language specified in bfo, and with the specified format template.
If format_template_code is provided, the template will not be loaded from
format_template_filename (but format_template_filename will still be used to
determine if bft or xsl transformation applies). This allows to preview format
code without having to save file on disk.
@param format_template_filename: the dilename of a format template
@param bfo: the object containing parameters for the current formatting
@param format_template_code: if not empty, use code as template instead of reading format_template_filename (used for previews)
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: formatted text
"""
if format_template_code is not None:
format_content = str(format_template_code)
else:
format_content = get_format_template(format_template_filename)['code']
if format_template_filename is None or \
format_template_filename.endswith("."+CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION):
# .bft
format_content = translate_template(format_content, bfo.lang)
evaluated_format, needs_2nd_pass = eval_format_template_elements(
format_content,
bfo,
verbose)
else:
#.xsl
if bfo.xml_record:
# bfo was initialized with a custom MARCXML
xml_record = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
record_xml_output(bfo.record)
else:
# Fetch MARCXML. On-the-fly xm if we are now formatting in xm
xml_record = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
record_get_xml(bfo.recID, 'xm', on_the_fly=False)
# Transform MARCXML using stylesheet
evaluated_format = bibformat_xslt_engine.format(xml_record, template_source=format_content)
needs_2nd_pass = False
return evaluated_format, needs_2nd_pass
def eval_format_template_elements(format_template, bfo, verbose=0):
"""
Evalutes the format elements of the given template and replace each element with its value.
Prepare the format template content so that we can directly replace the marc code by their value.
This implies:
1. Look for special tags
2. replace special tags by their evaluation
@param format_template: the format template code
@param bfo: the object containing parameters for the current formatting
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors, 7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: tuple (result, errors)
"""
_ = gettext_set_language(bfo.lang)
status = {'no_cache': False}
# First define insert_element_code(match), used in re.sub() function
def insert_element_code(match):
"""
Analyses 'match', interpret the corresponding code, and return the result of the evaluation.
Called by substitution in 'eval_format_template_elements(...)'
@param match: a match object corresponding to the special tag that must be interpreted
"""
function_name = match.group("function_name")
# Ignore lang tags the processing is done outside
if function_name == 'lang':
return match.group(0)
try:
format_element = get_format_element(function_name, verbose)
except Exception, e:
register_exception(req=bfo.req)
format_element = None
if verbose >= 5:
return '<b><span style="color: rgb(255, 0, 0);">' + \
cgi.escape(str(e)).replace('\n', '<br/>') + \
'</span>'
if format_element is None:
try:
raise InvenioBibFormatError(_('Could not find format element named %s.') % function_name)
except InvenioBibFormatError, exc:
register_exception(req=bfo.req)
if verbose >= 5:
return '<b><span style="color: rgb(255, 0, 0);">' + \
str(exc.message)+'</span></b>'
else:
params = {}
# Look for function parameters given in format template code
all_params = match.group('params')
if all_params is not None:
function_params_iterator = pattern_function_params.finditer(all_params)
for param_match in function_params_iterator:
name = param_match.group('param')
value = param_match.group('value')
params[name] = value
if params.get('no_cache') == '1':
result = match.group("function_name")
del params['no_cache']
if params:
params_str = ' '.join('%s="%s"' % (k, v) for k, v in params.iteritems())
result = "<bfe_%s %s />" % (result, params_str)
else:
result = "<bfe_%s />" % result
status['no_cache'] = True
else:
# Evaluate element with params and return (Do not return errors)
result, dummy = eval_format_element(format_element,
bfo,
params,
verbose)
return result
# Substitute special tags in the format by our own text.
# Special tags have the form <BNE_format_element_name [param="value"]* />
fmt = pattern_tag.sub(insert_element_code, format_template)
return fmt, status['no_cache']
def eval_format_element(format_element, bfo, parameters=None, verbose=0):
"""
Returns the result of the evaluation of the given format element
name, with given L{BibFormatObject} and parameters. Also returns
the errors of the evaluation.
@param format_element: a format element structure as returned by get_format_element
@param bfo: a L{BibFormatObject} used for formatting
@param parameters: a dict of parameters to be used for formatting. Key is parameter and value is value of parameter
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: tuple (result, errors)
"""
if parameters is None:
parameters = {}
errors = []
#Load special values given as parameters
prefix = parameters.get('prefix', "")
suffix = parameters.get('suffix', "")
default_value = parameters.get('default', "")
escape = parameters.get('escape', "")
output_text = ''
# 3 possible cases:
# a) format element file is found: we execute it
# b) format element file is not found, but exist in tag table (e.g. bfe_isbn)
# c) format element is totally unknown. Do nothing or report error
if format_element is not None and format_element['type'] == "python":
# a) We found an element with the tag name, of type "python"
# Prepare a dict 'params' to pass as parameter to 'format'
# function of element
params = {}
# Look for parameters defined in format element
# Fill them with specified default values and values
# given as parameters.
# Also remember if the element overrides the 'escape'
# parameter
format_element_overrides_escape = False
for param in format_element['attrs']['params']:
name = param['name']
default = param['default']
params[name] = parameters.get(name, default)
if name == 'escape':
format_element_overrides_escape = True
# Add BibFormatObject
params['bfo'] = bfo
# Execute function with given parameters and return result.
function = format_element['code']
_ = gettext_set_language(bfo.lang)
try:
output_text = function(**params)
except Exception, e:
register_exception(req=bfo.req)
name = format_element['attrs']['name']
try:
raise InvenioBibFormatError(_('Error when evaluating format element %s with parameters %s.') % (name, str(params)))
except InvenioBibFormatError, exc:
errors.append(exc.message)
if verbose >= 5:
tb = sys.exc_info()[2]
stack = traceback.format_exception(Exception, e, tb, limit=None)
output_text = '<b><span style="color: rgb(255, 0, 0);">'+ \
str(exc.message) + "".join(stack) +'</span></b> '
# None can be returned when evaluating function
if output_text is None:
output_text = ""
else:
output_text = str(output_text)
# Escaping:
# (1) By default, everything is escaped in mode 1
# (2) If evaluated element has 'escape_values()' function, use
# its returned value as escape mode, and override (1)
# (3) If template has a defined parameter 'escape' (in allowed
# values), use it, and override (1) and (2). If this
# 'escape' parameter is overriden by the format element
# (defined in the 'format' function of the element), leave
# the escaping job to this element
# (1)
escape_mode = 1
# (2)
escape_function = format_element['escape_function']
if escape_function is not None:
try:
escape_mode = escape_function(bfo=bfo)
except Exception, e:
try:
raise InvenioBibFormatError(_('Escape mode for format element %s could not be retrieved. Using default mode instead.') % name)
except InvenioBibFormatError, exc:
register_exception(req=bfo.req)
errors.append(exc.message)
if verbose >= 5:
tb = sys.exc_info()[2]
output_text += '<b><span style="color: rgb(255, 0, 0);">'+ \
str(exc.message) +'</span></b> '
# (3)
if escape in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']:
escape_mode = int(escape)
# If escape is equal to 1, then escape all
# HTML reserved chars.
if escape_mode > 0 and not format_element_overrides_escape:
output_text = escape_field(output_text, mode=escape_mode)
# Add prefix and suffix if they have been given as parameters and if
# the evaluation of element is not empty
if output_text.strip() != "":
output_text = prefix + output_text + suffix
# Add the default value if output_text is empty
if output_text == "":
output_text = default_value
return output_text, errors
elif format_element is not None and format_element['type'] == "field":
# b) We have not found an element in files that has the tag
# name. Then look for it in the table "tag"
#
# <BFE_LABEL_IN_TAG prefix = "" suffix = "" separator = ""
# nbMax="" escape="0"/>
#
# Load special values given as parameters
separator = parameters.get('separator ', "")
nbMax = parameters.get('nbMax', "")
escape = parameters.get('escape', "1") # By default, escape here
# Get the fields tags that have to be printed
tags = format_element['attrs']['tags']
output_text = []
# Get values corresponding to tags
for tag in tags:
p_tag = parse_tag(tag)
values = record_get_field_values(bfo.get_record(),
p_tag[0],
p_tag[1],
p_tag[2],
p_tag[3])
if len(values)>0 and isinstance(values[0], dict):
#flatten dict to its values only
values_list = [x.values() for x in values]
#output_text.extend(values)
for values in values_list:
output_text.extend(values)
else:
output_text.extend(values)
if nbMax != "":
try:
nbMax = int(nbMax)
except ValueError:
name = format_element['attrs']['name']
try:
raise InvenioBibFormatError(_('"nbMax" parameter for %s must be an "int".') % name)
except InvenioBibFormatError, exc:
register_exception(req=bfo.req)
errors.append(exc.message)
if verbose >= 5:
output_text = output_text.append(exc.message)
else:
output_text = output_text[:nbMax]
# Add prefix and suffix if they have been given as parameters and if
# the evaluation of element is not empty.
# If evaluation is empty string, return default value if it exists.
# Else return empty string
if ("".join(output_text)).strip() != "":
# If escape is equal to 1, then escape all
# HTML reserved chars.
if escape == '1':
output_text = cgi.escape(separator.join(output_text))
else:
output_text = separator.join(output_text)
output_text = prefix + output_text + suffix
else:
#Return default value
output_text = default_value
return (output_text, errors)
else:
# c) Element is unknown
try:
raise InvenioBibFormatError(_('Could not find format element named %s.') % format_element)
except InvenioBibFormatError, exc:
register_exception(req=bfo.req)
errors.append(exc.message)
if verbose < 5:
return ("", errors)
elif verbose >= 5:
if verbose >= 9:
sys.exit(exc.message)
return ('<b><span style="color: rgb(255, 0, 0);">' +
str(exc.message)+'</span></b>', errors)
def filter_languages(format_template, ln=CFG_SITE_LANG):
"""
Filters the language tags that do not correspond to the specified language.
@param format_template: the format template code
@param ln: the language that is NOT filtered out from the template
@return: the format template with unnecessary languages filtered out
"""
# First define search_lang_tag(match) and clean_language_tag(match), used
# in re.sub() function
def search_lang_tag(match):
"""
Searches for the <lang>...</lang> tag and remove inner localized tags
such as <en>, <fr>, that are not current_lang.
If current_lang cannot be found inside <lang> ... </lang>, try to use 'CFG_SITE_LANG'
@param match: a match object corresponding to the special tag that must be interpreted
"""
current_lang = ln
def clean_language_tag(match):
"""
Return tag text content if tag language of match is output language.
Called by substitution in 'filter_languages(...)'
@param match: a match object corresponding to the special tag that must be interpreted
"""
if match.group(1) == current_lang:
return match.group(2)
else:
return ""
# End of clean_language_tag
lang_tag_content = match.group("langs")
# Try to find tag with current lang. If it does not exists,
# then current_lang becomes CFG_SITE_LANG until the end of this
# replace
pattern_current_lang = re.compile(r"<(" + current_lang +
r")\s*>(.*)(</" + current_lang + r"\s*>)", re.IGNORECASE | re.DOTALL)
if re.search(pattern_current_lang, lang_tag_content) is None:
current_lang = CFG_SITE_LANG
cleaned_lang_tag = ln_pattern.sub(clean_language_tag, lang_tag_content)
return cleaned_lang_tag.strip()
# End of search_lang_tag
filtered_format_template = pattern_lang.sub(search_lang_tag, format_template)
return filtered_format_template
def get_format_template(filename, with_attributes=False):
"""
Returns the structured content of the given formate template.
if 'with_attributes' is true, returns the name and description. Else 'attrs' is not
returned as key in dictionary (it might, if it has already been loaded previously)::
{'code':"<b>Some template code</b>"
'attrs': {'name': "a name", 'description': "a description"}
}
@param filename: the filename of an format template
@param with_attributes: if True, fetch the attributes (names and description) for format'
@return: strucured content of format template
"""
_ = gettext_set_language(CFG_SITE_LANG)
if not filename.endswith("."+CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION) and \
not filename.endswith(".xsl"):
return None
# Get from cache whenever possible
if filename in format_templates_cache:
# If we must return with attributes and template exist in
# cache with attributes then return cache.
# Else reload with attributes
if with_attributes and \
'attrs' in format_templates_cache[filename]:
return format_templates_cache[filename]
format_template = {'code': ""}
try:
path = "%s%s%s" % (CFG_BIBFORMAT_TEMPLATES_PATH, os.sep, filename)
format_file = open(path)
format_content = format_file.read()
format_file.close()
# Load format template code
# Remove name and description
if filename.endswith("."+CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION):
code_and_description = pattern_format_template_name.sub("",
format_content, 1)
code = pattern_format_template_desc.sub("", code_and_description, 1)
else:
code = format_content
format_template['code'] = code
except:
register_exception()
# Save attributes if necessary
if with_attributes:
format_template['attrs'] = get_format_template_attrs(filename)
# Cache and return
format_templates_cache[filename] = format_template
return format_template
def get_format_templates(with_attributes=False):
"""
Returns the list of all format templates, as dictionary with filenames as keys
if 'with_attributes' is true, returns the name and description. Else 'attrs' is not
returned as key in each dictionary (it might, if it has already been loaded previously)::
[{'code':"<b>Some template code</b>"
'attrs': {'name': "a name", 'description': "a description"}
},
...
}
@param with_attributes: if True, fetch the attributes (names and description) for formats
@return: the list of format templates (with code and info)
"""
format_templates = {}
files = os.listdir(CFG_BIBFORMAT_TEMPLATES_PATH)
for filename in files:
if filename.endswith("."+CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION) or \
filename.endswith(".xsl"):
format_templates[filename] = get_format_template(filename,
with_attributes)
return format_templates
def get_format_template_attrs(filename):
"""
Returns the attributes of the format template with given filename
The attributes are {'name', 'description'}
Caution: the function does not check that path exists or
that the format element is valid.
@param filename: the name of a format template
@return: a structure with detailed information about given format template
"""
_ = gettext_set_language(CFG_SITE_LANG)
attrs = {}
attrs['name'] = ""
attrs['description'] = ""
try:
template_file = open("%s%s%s" % (CFG_BIBFORMAT_TEMPLATES_PATH,
os.sep,
filename))
code = template_file.read()
template_file.close()
match = None
if filename.endswith(".xsl"):
# .xsl
attrs['name'] = filename[:-4]
else:
# .bft
match = pattern_format_template_name.search(code)
if match is not None:
attrs['name'] = match.group('name')
else:
attrs['name'] = filename
match = pattern_format_template_desc.search(code)
if match is not None:
attrs['description'] = match.group('desc').rstrip('.')
except Exception, e:
try:
raise InvenioBibFormatError(_('Could not read format template named %s. %s.') % (filename, str(e)))
except InvenioBibFormatError:
register_exception()
attrs['name'] = filename
return attrs
def get_format_element(element_name, verbose=0, with_built_in_params=False,
soft_fail=False):
"""
Returns the format element structured content.
Return None if element cannot be loaded (file not found, not readable or
invalid)
The returned structure is::
{'attrs': {some attributes in dict. See get_format_element_attrs_from_*}
'code': the_function_code,
'type':"field" or "python" depending if element is defined in file or table,
'escape_function': the function to call to know if element output must be escaped}
@param element_name: the name of the format element to load
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@param with_built_in_params: if True, load the parameters built in all elements
@return: a dictionary with format element attributes
"""
_ = gettext_set_language(CFG_SITE_LANG)
# Resolve filename and prepare 'name' as key for the cache
filename = resolve_format_element_filename(element_name)
if filename is not None:
name = filename.upper()
else:
name = element_name.upper()
if name in format_elements_cache:
element = format_elements_cache[name]
if not with_built_in_params or \
(with_built_in_params and
'builtin_params' in element['attrs']):
return element
if filename is None:
# Element is maybe in tag table
if bibformat_dblayer.tag_exists_for_name(element_name):
format_element = {'attrs': get_format_element_attrs_from_table(
element_name,
with_built_in_params),
'code': None,
'escape_function': None,
'type': "field"}
# Cache and returns
format_elements_cache[name] = format_element
return format_element
elif soft_fail:
register_exception()
return None
else:
raise InvenioBibFormatError(_('Format element %s could not be found.') % element_name)
else:
format_element = {}
module_name = filename
if module_name.endswith(".py"):
module_name = module_name[:-3]
# Load element
try:
module = __import__(CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH +
"." + module_name)
# Load last module in import path
# For eg. load bfe_name in
# invenio.bibformat_elements.bfe_name
# Used to keep flexibility regarding where elements
# directory is (for eg. test cases)
components = CFG_BIBFORMAT_ELEMENTS_IMPORT_PATH.split(".")
for comp in components[1:]:
module = getattr(module, comp)
except:
if soft_fail:
register_exception()
return None
else:
raise
# Load function 'format_element()' inside element
try:
function_format = module.__dict__[module_name].format_element
format_element['code'] = function_format
except AttributeError, e:
# Try to load 'format()' function
try:
function_format = module.__dict__[module_name].format
format_element['code'] = function_format
except AttributeError, e:
if soft_fail:
try:
raise InvenioBibFormatError(_('Format element %s has no function named "format".') % element_name)
except InvenioBibFormatError, exc:
register_exception()
return None
else:
raise
# Load function 'escape_values()' inside element
function_escape = getattr(module.__dict__[module_name],
'escape_values',
None)
format_element['escape_function'] = function_escape
# Prepare, cache and return
format_element['attrs'] = get_format_element_attrs_from_function(
function_format,
element_name,
with_built_in_params)
format_element['type'] = "python"
format_elements_cache[name] = format_element
return format_element
def get_format_elements(with_built_in_params=False):
"""
Returns the list of format elements attributes as dictionary structure
Elements declared in files have priority over element declared in 'tag' table
The returned object has this format::
{element_name1: {'attrs': {'description':..., 'seealso':...
'params':[{'name':..., 'default':..., 'description':...}, ...]
'builtin_params':[{'name':..., 'default':..., 'description':...}, ...]
},
'code': code_of_the_element
},
element_name2: {...},
...}
Returns only elements that could be loaded (not error in code)
@return: a dict of format elements with name as key, and a dict as attributes
@param with_built_in_params: if True, load the parameters built in all elements
"""
format_elements = {}
mappings = bibformat_dblayer.get_all_name_tag_mappings()
for name in mappings:
format_elements[name.upper().replace(" ", "_").strip()] = get_format_element(name, with_built_in_params=with_built_in_params)
files = os.listdir(CFG_BIBFORMAT_ELEMENTS_PATH)
for filename in files:
filename_test = filename.upper().replace(" ", "_")
if filename_test.endswith(".PY") and filename.upper() != "__INIT__.PY":
if filename_test.startswith("BFE_"):
filename_test = filename_test[4:]
element_name = filename_test[:-3]
element = get_format_element(element_name,
with_built_in_params=with_built_in_params,
soft_fail=True)
if element is not None:
format_elements[element_name] = element
return format_elements
def get_format_element_attrs_from_function(function, element_name,
with_built_in_params=False):
"""
Returns the attributes of the function given as parameter.
It looks for standard parameters of the function, default
values and comments in the docstring.
The attributes are::
{'name' : "name of element" #basically the name of 'name' parameter
'description': "a string description of the element",
'seealso' : ["element_1.py", "element_2.py", ...] #a list of related elements
'params': [{'name':"param_name", #a list of parameters for this element (except 'bfo')
'default':"default value",
'description': "a description"}, ...],
'builtin_params': {name: {'name':"param_name",#the parameters builtin for all elem of this kind
'default':"default value",
'description': "a description"}, ...},
}
@param function: the formatting function of a format element
@param element_name: the name of the element
@param with_built_in_params: if True, load the parameters built in all elements
@return: a structure with detailed information of a function
"""
attrs = {}
attrs['description'] = ""
attrs['name'] = element_name.replace(" ", "_").upper()
attrs['seealso'] = []
docstring = function.__doc__
if isinstance(docstring, str):
# Look for function description in docstring
#match = pattern_format_element_desc.search(docstring)
description = docstring.split("@param")[0]
description = description.split("@see:")[0]
attrs['description'] = description.strip().rstrip('.')
# Look for @see: in docstring
match = pattern_format_element_seealso.search(docstring)
if match is not None:
elements = match.group('see').rstrip('.').split(",")
for element in elements:
attrs['seealso'].append(element.strip())
params = {}
# Look for parameters in function definition
args, dummy_varargs, dummy_varkw, defaults = inspect.getargspec(function)
# Prepare args and defaults_list such that we can have a mapping
# from args to defaults
args.reverse()
if defaults is not None:
defaults_list = list(defaults)
defaults_list.reverse()
else:
defaults_list = []
for arg, default in map(None, args, defaults_list):
if arg == "bfo":
#Don't keep this as parameter. It is hidden to users, and
#exists in all elements of this kind
continue
param = {}
param['name'] = arg
if default is None:
#In case no check is made inside element, we prefer to
#print "" (nothing) than None in output
param['default'] = ""
else:
param['default'] = default
param['description'] = "(no description provided)"
params[arg] = param
if isinstance(docstring, str):
# Look for AT param descriptions in docstring.
# Add description to existing parameters in params dict
params_iterator = pattern_format_element_params.finditer(docstring)
for match in params_iterator:
name = match.group('name')
if name in params:
params[name]['description'] = match.group('desc').rstrip('.')
attrs['params'] = params.values()
# Load built-in parameters if necessary
if with_built_in_params:
builtin_params = []
# Add 'prefix' parameter
param_prefix = {}
param_prefix['name'] = "prefix"
param_prefix['default'] = ""
param_prefix['description'] = """A prefix printed only if the
record has a value for this element"""
builtin_params.append(param_prefix)
# Add 'suffix' parameter
param_suffix = {}
param_suffix['name'] = "suffix"
param_suffix['default'] = ""
param_suffix['description'] = """A suffix printed only if the
record has a value for this element"""
builtin_params.append(param_suffix)
# Add 'default' parameter
param_default = {}
param_default['name'] = "default"
param_default['default'] = ""
param_default['description'] = """A default value printed if the
record has no value for this element"""
builtin_params.append(param_default)
# Add 'escape' parameter
param_escape = {}
param_escape['name'] = "escape"
param_escape['default'] = ""
param_escape['description'] = """0 keeps value as it is. Refer to main
documentation for escaping modes
1 to 7"""
builtin_params.append(param_escape)
attrs['builtin_params'] = builtin_params
return attrs
def get_format_element_attrs_from_table(element_name,
with_built_in_params=False):
"""
Returns the attributes of the format element with given name in 'tag' table.
Returns None if element_name does not exist in tag table.
The attributes are::
{'name' : "name of element" #basically the name of 'element_name' parameter
'description': "a string description of the element",
'seealso' : [] #a list of related elements. Always empty in this case
'params': [], #a list of parameters for this element. Always empty in this case
'builtin_params': [{'name':"param_name", #the parameters builtin for all elem of this kind
'default':"default value",
'description': "a description"}, ...],
'tags':["950.1", 203.a] #the list of tags printed by this element
}
@param element_name: an element name in database
@param element_name: the name of the element
@param with_built_in_params: if True, load the parameters built in all elements
@return: a structure with detailed information of an element found in DB
"""
attrs = {}
tags = bibformat_dblayer.get_tags_from_name(element_name)
field_label = "field"
if len(tags)>1:
field_label = "fields"
attrs['description'] = "Prints %s %s of the record" % (field_label,
", ".join(tags))
attrs['name'] = element_name.replace(" ", "_").upper()
attrs['seealso'] = []
attrs['params'] = []
attrs['tags'] = tags
# Load built-in parameters if necessary
if with_built_in_params:
builtin_params = []
# Add 'prefix' parameter
param_prefix = {}
param_prefix['name'] = "prefix"
param_prefix['default'] = ""
param_prefix['description'] = """A prefix printed only if the
record has a value for this element"""
builtin_params.append(param_prefix)
# Add 'suffix' parameter
param_suffix = {}
param_suffix['name'] = "suffix"
param_suffix['default'] = ""
param_suffix['description'] = """A suffix printed only if the
record has a value for this element"""
builtin_params.append(param_suffix)
# Add 'separator' parameter
param_separator = {}
param_separator['name'] = "separator"
param_separator['default'] = " "
param_separator['description'] = """A separator between elements of
the field"""
builtin_params.append(param_separator)
# Add 'nbMax' parameter
param_nbMax = {}
param_nbMax['name'] = "nbMax"
param_nbMax['default'] = ""
param_nbMax['description'] = """The maximum number of values to
print for this element. No limit if not
specified"""
builtin_params.append(param_nbMax)
# Add 'default' parameter
param_default = {}
param_default['name'] = "default"
param_default['default'] = ""
param_default['description'] = """A default value printed if the
record has no value for this element"""
builtin_params.append(param_default)
# Add 'escape' parameter
param_escape = {}
param_escape['name'] = "escape"
param_escape['default'] = ""
param_escape['description'] = """If set to 1, replaces special
characters '&', '<' and '>' of this
element by SGML entities"""
builtin_params.append(param_escape)
attrs['builtin_params'] = builtin_params
return attrs
def get_output_format(code, with_attributes=False, verbose=0):
"""
Returns the structured content of the given output format
If 'with_attributes' is true, also returns the names and description of the output formats,
else 'attrs' is not returned in dict (it might, if it has already been loaded previously).
if output format corresponding to 'code' is not found return an empty structure.
See get_output_format_attrs() to learn more about the attributes::
{'rules': [ {'field': "980__a",
'value': "PREPRINT",
'template': "filename_a.bft",
},
{...}
],
'attrs': {'names': {'generic':"a name", 'sn':{'en': "a name", 'fr':"un nom"}, 'ln':{'en':"a long name"}}
'description': "a description"
'code': "fnm1",
'content_type': "application/ms-excel",
'visibility': 1
}
'default':"filename_b.bft"
}
@param code: the code of an output_format
@param with_attributes: if True, fetch the attributes (names and description) for format
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: strucured content of output format
"""
_ = gettext_set_language(CFG_SITE_LANG)
output_format = {'rules': [], 'default': ""}
filename = resolve_output_format_filename(code, verbose)
if filename is None:
try:
raise InvenioBibFormatError(_('Output format with code %s could not be found.') % code)
except InvenioBibFormatError:
register_exception()
if with_attributes: #Create empty attrs if asked for attributes
output_format['attrs'] = get_output_format_attrs(code, verbose)
return output_format
# Get from cache whenever possible
if filename in format_outputs_cache:
# If was must return with attributes but cache has not
# attributes, then load attributes
if with_attributes and not \
'attrs' in format_outputs_cache[filename]:
format_outputs_cache[filename]['attrs'] = get_output_format_attrs(code, verbose)
return format_outputs_cache[filename]
try:
if with_attributes:
output_format['attrs'] = get_output_format_attrs(code, verbose)
path = "%s%s%s" % (CFG_BIBFORMAT_OUTPUTS_PATH, os.sep, filename)
format_file = open(path)
current_tag = ''
for line in format_file:
line = line.strip()
if line == "":
# Ignore blank lines
continue
if line.endswith(":"):
# Retrieve tag
# Remove : spaces and eol at the end of line
clean_line = line.rstrip(": \n\r")
# The tag starts at second position
current_tag = "".join(clean_line.split()[1:]).strip()
elif line.find('---') != -1:
words = line.split('---')
template = words[-1].strip()
condition = ''.join(words[:-1])
output_format['rules'].append({'field': current_tag,
'value': condition,
'template': template,
})
elif line.find(':') != -1:
# Default case
default = line.split(':')[1].strip()
output_format['default'] = default
except:
register_exception()
# Cache and return
format_outputs_cache[filename] = output_format
return output_format
def get_output_format_attrs(code, verbose=0):
"""
Returns the attributes of an output format.
The attributes contain 'code', which is the short identifier of the output format
(to be given as parameter in format_record function to specify the output format),
'description', a description of the output format, 'visibility' the visibility of
the format in the output format list on public pages and 'names', the localized names
of the output format. If 'content_type' is specified then the search_engine will
send a file with this content type and with result of formatting as content to the user.
The 'names' dict always contais 'generic', 'ln' (for long name) and 'sn' (for short names)
keys. 'generic' is the default name for output format. 'ln' and 'sn' contain long and short
localized names of the output format. Only the languages for which a localization exist
are used::
{'names': {'generic':"a name", 'sn':{'en': "a name", 'fr':"un nom"}, 'ln':{'en':"a long name"}}
'description': "a description"
'code': "fnm1",
'content_type': "application/ms-excel",
'visibility': 1
}
@param code: the short identifier of the format
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: strucured content of output format attributes
"""
if code.endswith("."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION):
code = code[:-(len(CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION) + 1)]
attrs = {'names': {'generic': "",
'ln': {},
'sn': {}},
'description': '',
'code': code.upper(),
'content_type': "",
'visibility': 1}
filename = resolve_output_format_filename(code, verbose)
if filename is None:
return attrs
attrs['names'] = bibformat_dblayer.get_output_format_names(code)
attrs['description'] = bibformat_dblayer.get_output_format_description(code)
attrs['content_type'] = bibformat_dblayer.get_output_format_content_type(code)
attrs['visibility'] = bibformat_dblayer.get_output_format_visibility(code)
return attrs
def get_output_formats(with_attributes=False):
"""
Returns the list of all output format, as a dictionary with their filename as key
If 'with_attributes' is true, also returns the names and description of the output formats,
else 'attrs' is not returned in dicts (it might, if it has already been loaded previously).
See get_output_format_attrs() to learn more on the attributes::
{'filename_1.bfo': {'rules': [ {'field': "980__a",
'value': "PREPRINT",
'template': "filename_a.bft",
},
{...}
],
'attrs': {'names': {'generic':"a name", 'sn':{'en': "a name", 'fr':"un nom"}, 'ln':{'en':"a long name"}}
'description': "a description"
'code': "fnm1"
}
'default':"filename_b.bft"
},
'filename_2.bfo': {...},
...
}
@param with_attributes: if returned output formats contain detailed info, or not
@type with_attributes: boolean
@return: the list of output formats
"""
output_formats = {}
files = os.listdir(CFG_BIBFORMAT_OUTPUTS_PATH)
for filename in files:
if filename.endswith("."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION):
code = "".join(filename.split(".")[:-1])
output_formats[filename] = get_output_format(code, with_attributes)
return output_formats
def memoize(obj):
cache = obj.cache = {}
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
@memoize
def resolve_format_element_filename(element_name):
"""
Returns the filename of element corresponding to x{element_name}
This is necessary since format templates code call
elements by ignoring case, for eg. <BFE_AUTHOR> is the
same as <BFE_author>.
It is also recommended that format elements filenames are
prefixed with bfe_ . We need to look for these too.
The name of the element has to start with "BFE_".
@param element_name: a name for a format element
@return: the corresponding filename, with right case
"""
if not element_name.endswith(".py"):
name = element_name.replace(" ", "_").upper() +".PY"
else:
name = element_name.replace(" ", "_").upper()
files = os.listdir(CFG_BIBFORMAT_ELEMENTS_PATH)
for filename in files:
test_filename = filename.replace(" ", "_").upper()
if test_filename == name or \
test_filename == "BFE_" + name or \
"BFE_" + test_filename == name:
return filename
# No element with that name found
# Do not log error, as it might be a normal execution case:
# element can be in database
return None
@memoize
def resolve_output_format_filename(code, verbose=0):
"""
Returns the filename of output corresponding to code
This is necessary since output formats names are not case sensitive
but most file systems are.
@param code: the code for an output format
@param verbose: the level of verbosity from 0 to 9 (O: silent,
5: errors,
7: errors and warnings,
9: errors and warnings, stop if error (debug mode ))
@return: the corresponding filename, with right case, or None if not found
"""
_ = gettext_set_language(CFG_SITE_LANG)
#Remove non alphanumeric chars (except . and _)
code = re.sub(r"[^.0-9a-zA-Z_]", "", code)
if not code.endswith("."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION):
code = re.sub(r"\W", "", code)
code += "."+CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION
files = os.listdir(CFG_BIBFORMAT_OUTPUTS_PATH)
for filename in files:
if filename.upper() == code.upper():
return filename
# No output format with that name found
raise InvenioBibFormatError(_('Could not find output format named %s.') % code)
if verbose >= 5:
sys.stderr.write(exc.message)
if verbose >= 9:
sys.exit(exc.message)
return None
def get_fresh_format_template_filename(name):
"""
Returns a new filename and name for template with given name.
Used when writing a new template to a file, so that the name
has no space, is unique in template directory
Returns (unique_filename, modified_name)
@param name: name for a format template
@return: the corresponding filename, and modified name if necessary
"""
#name = re.sub(r"\W", "", name) #Remove non alphanumeric chars
name = name.replace(" ", "_")
filename = name
# Remove non alphanumeric chars (except .)
filename = re.sub(r"[^.0-9a-zA-Z]", "", filename)
path = CFG_BIBFORMAT_TEMPLATES_PATH + os.sep + filename \
+ "." + CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION
index = 1
while os.path.exists(path):
index += 1
filename = name + str(index)
path = CFG_BIBFORMAT_TEMPLATES_PATH + os.sep + filename \
+ "." + CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION
if index > 1:
returned_name = (name + str(index)).replace("_", " ")
else:
returned_name = name.replace("_", " ")
return (filename + "." + CFG_BIBFORMAT_FORMAT_TEMPLATE_EXTENSION,
returned_name) #filename.replace("_", " "))
def get_fresh_output_format_filename(code):
"""
Returns a new filename for output format with given code.
Used when writing a new output format to a file, so that the code
has no space, is unique in output format directory. The filename
also need to be at most 6 chars long, as the convention is that
filename == output format code (+ .extension)
We return an uppercase code
Returns (unique_filename, modified_code)
@param code: the code of an output format
@return: the corresponding filename, and modified code if necessary
"""
_ = gettext_set_language(CFG_SITE_LANG)
#code = re.sub(r"\W", "", code) #Remove non alphanumeric chars
code = code.upper().replace(" ", "_")
# Remove non alphanumeric chars (except . and _)
code = re.sub(r"[^.0-9a-zA-Z_]", "", code)
if len(code) > 6:
code = code[:6]
filename = code
path = CFG_BIBFORMAT_OUTPUTS_PATH + os.sep + filename \
+ "." + CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION
index = 2
while os.path.exists(path):
filename = code + str(index)
if len(filename) > 6:
filename = code[:-(len(str(index)))]+str(index)
index += 1
path = CFG_BIBFORMAT_OUTPUTS_PATH + os.sep + filename \
+ "." + CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION
# We should not try more than 99999... Well I don't see how we
# could get there.. Sanity check.
if index >= 99999:
try:
raise InvenioBibFormatError(_('Could not find a fresh name for output format %s.') % code)
except InvenioBibFormatError:
register_exception()
sys.exit("Output format cannot be named as %s" % code)
return (filename + "." + CFG_BIBFORMAT_FORMAT_OUTPUT_EXTENSION, filename)
def clear_caches():
"""
Clear the caches (Output Format, Format Templates and Format Elements)
@return: None
"""
global format_templates_cache, format_elements_cache, format_outputs_cache
format_templates_cache = {}
format_elements_cache = {}
format_outputs_cache = {}
class BibFormatObject(object):
"""
An object that encapsulates a record and associated methods, and that is given
as parameter to all format elements 'format' function.
The object is made specifically for a given formatting, i.e. it includes
for example the language for the formatting.
The object provides basic accessors to the record. For full access, one can get
the record with get_record() and then use BibRecord methods on the returned object.
"""
# The record
record = None
# The language in which the formatting has to be done
lang = CFG_SITE_LANG
# A list of string describing the context in which the record has
# to be formatted.
# It represents the words of the user request in web interface search
search_pattern = []
# The id of the record
recID = 0
# The information about the user, as returned by
# 'webuser.collect_user_info(req)'
user_info = None
# The format in which the record is being formatted
output_format = ''
req = None # DEPRECATED: use bfo.user_info instead. Used by WebJournal.
def __init__(self, recID, ln=CFG_SITE_LANG, search_pattern=None,
xml_record=None, user_info=None, output_format=''):
"""
Creates a new bibformat object, with given record.
You can either specify an record ID to format, or give its xml representation.
if 'xml_record' is not None, use 'xml_record' instead of recID for the record.
'user_info' allows to grant access to some functionalities on
a page depending on the user's priviledges. It is a dictionary
in the following form::
user_info = {
'remote_ip' : '',
'remote_host' : '',
'referer' : '',
'uri' : '',
'agent' : '',
'uid' : -1,
'nickname' : '',
'email' : '',
'group' : [],
'guest' : '1'
}
@param recID: the id of a record
@param ln: the language in which the record has to be formatted
@param search_pattern: list of string representing the request used by the user in web interface
@param xml_record: a xml string of the record to format
@param user_info: the information of the user who will view the formatted page
@param output_format: the output_format used for formatting this record
"""
self.xml_record = None # *Must* remain empty if recid is given
if xml_record is not None:
# If record is given as parameter
self.xml_record = xml_record
self.record = create_record(xml_record)[0]
if '001' in self.record:
recID = int(record_get_field_value(self.record, "001"))
self.lang = wash_language(ln)
if search_pattern is None:
search_pattern = []
self.search_pattern = search_pattern
try:
assert isinstance(recID, (int, long, type(None))), 'Argument of wrong type!'
except AssertionError:
from StringIO import StringIO
stream = StringIO()
traceback.print_stack(file=stream)
prefix = stream.getvalue()
prefix += "\nrecid needs to be an integer in BibFormatObject"
register_exception(prefix=prefix,
alert_admin=True)
recID = int(recID)
self.recID = recID
self.output_format = output_format
self.user_info = user_info
if self.user_info is None:
self.user_info = collect_user_info(None)
def get_record(self):
"""
Returns the record structure of this L{BibFormatObject} instance
@return: the record structure as defined by BibRecord library
"""
from invenio.search_engine import get_record
# Create record if necessary
if self.record is None:
# on-the-fly creation if current output is xm
self.record = get_record(self.recID)
return self.record
def control_field(self, tag, escape=0):
"""
Returns the value of control field given by tag in record
@param tag: the marc code of a field
@param escape: 1 if returned value should be escaped. Else 0.
@return: value of field tag in record
"""
if self.get_record() is None:
#Case where BibRecord could not parse object
return ''
p_tag = parse_tag(tag)
field_value = record_get_field_value(self.get_record(),
p_tag[0],
p_tag[1],
p_tag[2],
p_tag[3])
if escape == 0:
return field_value
else:
return escape_field(field_value, escape)
def field(self, tag, escape=0):
"""
Returns the value of the field corresponding to tag in the
current record.
If the value does not exist, return empty string. Else
returns the same as bfo.fields(..)[0] (see docstring below).
'escape' parameter allows to escape special characters
of the field. The value of escape can be:
0. no escaping
1. escape all HTML characters
2. remove unsafe HTML tags (Eg. keep <br />)
3. Mix of mode 1 and 2. If value of field starts with
<!-- HTML -->, then use mode 2. Else use mode 1.
4. Remove all HTML tags
5. Same as 2, with more tags allowed (like <img>)
6. Same as 3, with more tags allowed (like <img>)
7. Mix of mode 0 and mode 1. If field_value
starts with <!--HTML-->, then use mode 0.
Else use mode 1.
8. Same as mode 1, but also escape double-quotes
9. Same as mode 4, but also escape double-quotes
@param tag: the marc code of a field
@param escape: 1 if returned value should be escaped. Else 0. (see above for other modes)
@return: value of field tag in record
"""
list_of_fields = self.fields(tag)
if len(list_of_fields) > 0:
# Escaping below
if escape == 0:
return list_of_fields[0]
else:
return escape_field(list_of_fields[0], escape)
else:
return ""
def fields(self, tag, escape=0, repeatable_subfields_p=False):
"""
Returns the list of values corresonding to "tag".
If tag has an undefined subcode (such as 999C5),
the function returns a list of dictionaries, whoose keys
are the subcodes and the values are the values of tag.subcode.
If the tag has a subcode, simply returns list of values
corresponding to tag.
Eg. for given MARC::
999C5 $a value_1a $b value_1b
999C5 $b value_2b
999C5 $b value_3b $b value_3b_bis
>>> bfo.fields('999C5b')
>>> ['value_1b', 'value_2b', 'value_3b', 'value_3b_bis']
>>> bfo.fields('999C5')
>>> [{'a':'value_1a', 'b':'value_1b'},
{'b':'value_2b'},
{'b':'value_3b'}]
By default the function returns only one value for each
subfield (that is it considers that repeatable subfields are
not allowed). It is why in the above example 'value3b_bis' is
not shown for bfo.fields('999C5'). (Note that it is not
defined which of value_3b or value_3b_bis is returned). This
is to simplify the use of the function, as most of the time
subfields are not repeatable (in that way we get a string
instead of a list). You can allow repeatable subfields by
setting 'repeatable_subfields_p' parameter to True. In
this mode, the above example would return:
>>> bfo.fields('999C5b', repeatable_subfields_p=True)
>>> ['value_1b', 'value_2b', 'value_3b']
>>> bfo.fields('999C5', repeatable_subfields_p=True)
>>> [{'a':['value_1a'], 'b':['value_1b']},
{'b':['value_2b']},
{'b':['value_3b', 'value3b_bis']}]
NOTICE THAT THE RETURNED STRUCTURE IS DIFFERENT. Also note
that whatever the value of 'repeatable_subfields_p' is,
bfo.fields('999C5b') always show all fields, even repeatable
ones. This is because the parameter has no impact on the
returned structure (it is always a list).
'escape' parameter allows to escape special characters
of the fields. The value of escape can be:
0. No escaping
1. Escape all HTML characters
2. Remove unsafe HTML tags (Eg. keep <br />)
3. Mix of mode 1 and 2. If value of field starts with
<!-- HTML -->, then use mode 2. Else use mode 1.
4. Remove all HTML tags
5. Same as 2, with more tags allowed (like <img>)
6. Same as 3, with more tags allowed (like <img>)
7. Mix of mode 0 and mode 1. If field_value
starts with <!--HTML-->, then use mode 0.
Else use mode 1.
8. Same as mode 1, but also escape double-quotes
9. Same as mode 4, but also escape double-quotes
@param tag: the marc code of a field
@param escape: 1 if returned values should be escaped. Else 0.
@repeatable_subfields_p if True, returns the list of subfields in the dictionary
@return: values of field tag in record
"""
if self.get_record() is None:
# Case where BibRecord could not parse object
return []
p_tag = parse_tag(tag)
if p_tag[3] != "":
# Subcode has been defined. Simply returns list of values
values = record_get_field_values(self.get_record(),
p_tag[0],
p_tag[1],
p_tag[2],
p_tag[3])
if escape == 0:
return values
else:
return [escape_field(value, escape) for value in values]
else:
# Subcode is undefined. Returns list of dicts.
# However it might be the case of a control field.
instances = record_get_field_instances(self.get_record(),
p_tag[0],
p_tag[1],
p_tag[2])
if repeatable_subfields_p:
list_of_instances = []
for instance in instances:
instance_dict = {}
for subfield in instance[0]:
if subfield[0] not in instance_dict:
instance_dict[subfield[0]] = []
if escape == 0:
instance_dict[subfield[0]].append(subfield[1])
else:
instance_dict[subfield[0]].append(escape_field(subfield[1], escape))
list_of_instances.append(instance_dict)
return list_of_instances
else:
if escape == 0:
return [dict(instance[0]) for instance in instances]
else:
return [dict([(subfield[0], escape_field(subfield[1], escape))
for subfield in instance[0]])
for instance in instances]
def kb(self, kb, string, default=""):
"""
Returns the value of the "string" in the knowledge base "kb".
If kb does not exist or string does not exist in kb,
returns 'default' string or empty string if not specified.
@param kb: a knowledge base name
@param string: the string we want to translate
@param default: a default value returned if 'string' not found in 'kb'
@return: a string value corresponding to translated input with given kb
"""
if not string:
return default
val = get_kbr_values(kb, searchkey=string, searchtype='e')
try:
return val[0][0]
except IndexError:
return default
def __repr__(self):
"""
Representation of the BibFormatObject. Useful for debugging.
"""
return "<BibFormatObject(recid=%r,lang=%r,search_pattern=%r,output_format=%r," \
"user_info=%r, record=%r)" % (self.recID,
self.lang,
self.search_pattern,
self.output_format,
self.user_info,
self.record)
# Utility functions
##
def escape_field(value, mode=0):
"""
Utility function used to escape the value of a field in given mode.
- mode 0: no escaping
- mode 1: escaping all HTML/XML characters (escaped chars are shown as escaped)
- mode 2: escaping unsafe HTML tags to avoid XSS, but
keep basic one (such as <br />)
Escaped tags are removed.
- mode 3: mix of mode 1 and mode 2. If field_value starts with <!--HTML-->,
then use mode 2. Else use mode 1.
- mode 4: escaping all HTML/XML tags (escaped tags are removed)
- mode 5: same as 2, but allows more tags, like <img>
- mode 6: same as 3, but allows more tags, like <img>
- mode 7: mix of mode 0 and mode 1. If field_value starts with <!--HTML-->,
then use mode 0. Else use mode 1.
- mode 8: same as mode 1, but also escape double-quotes
- mode 9: same as mode 4, but also escape double-quotes
- mode 10: escape for inclusion into TeX
@param value: value to escape
@param mode: escaping mode to use
@return: an escaped version of X{value} according to chosen X{mode}
"""
if mode == 1:
return cgi.escape(value)
elif mode == 8:
return cgi.escape(value, True)
elif mode in [2, 5]:
allowed_attribute_whitelist = CFG_HTML_BUFFER_ALLOWED_ATTRIBUTE_WHITELIST
allowed_tag_whitelist = CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST + \
('class',)
if mode == 5:
allowed_attribute_whitelist += ('src', 'alt',
'width', 'height',
'style', 'summary',
'border', 'cellspacing',
'cellpadding')
allowed_tag_whitelist += ('img', 'table', 'td',
'tr', 'th', 'span', 'caption')
try:
return washer.wash(value,
allowed_attribute_whitelist=
allowed_attribute_whitelist,
allowed_tag_whitelist=
allowed_tag_whitelist
)
except HTMLParseError:
# Parsing failed
return cgi.escape(value)
elif mode in [3, 6]:
if value.lstrip(' \n').startswith(html_field):
allowed_attribute_whitelist = CFG_HTML_BUFFER_ALLOWED_ATTRIBUTE_WHITELIST
allowed_tag_whitelist = CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST + \
('class',)
if mode == 6:
allowed_attribute_whitelist += ('src', 'alt',
'width', 'height',
'style', 'summary',
'border', 'cellspacing',
'cellpadding')
allowed_tag_whitelist += ('img', 'table', 'td',
'tr', 'th', 'span', 'caption')
try:
return washer.wash(value,
allowed_attribute_whitelist=
allowed_attribute_whitelist,
allowed_tag_whitelist=
allowed_tag_whitelist
)
except HTMLParseError:
# Parsing failed
return cgi.escape(value)
else:
return cgi.escape(value)
elif mode in [4, 9]:
try:
out = washer.wash(value,
allowed_attribute_whitelist=[],
allowed_tag_whitelist=[]
)
if mode == 9:
out = out.replace('"', '"')
return out
except HTMLParseError:
# Parsing failed
if mode == 4:
return cgi.escape(value)
else:
return cgi.escape(value, True)
elif mode == 7:
if value.lstrip(' \n').startswith(html_field):
return value
else:
return cgi.escape(value)
elif mode == 10:
return escape_latex(value)
else:
return value
def filter_hidden_fields(recxml, user_info=None, filter_tags=CFG_BIBFORMAT_HIDDEN_TAGS,
force_filtering=False):
"""
Filter out tags specified by filter_tags from MARCXML. If the user
is allowed to run bibedit, then filter nothing, unless
force_filtering is set to True.
@param recxml: marcxml presentation of the record
@param user_info: user information; if None, then assume invoked via CLI with all rights
@param filter_tags: list of MARC tags to be filtered
@param force_filtering: do we force filtering regardless of user rights?
@return: recxml without the hidden fields
"""
if force_filtering:
pass
else:
if user_info is None:
#by default
return recxml
else:
if (acc_authorize_action(user_info, 'runbibedit')[0] == 0):
#no need to filter
return recxml
#filter..
out = ""
omit = False
for line in recxml.splitlines(True):
#check if this block needs to be omitted
for htag in filter_tags:
if 'datafield tag="'+str(htag)+'"' in line:
omit = True
if not omit:
out += line
if omit and ('</datafield>' in line or '</marc:datafield>' in line):
omit = False
return out
def bf_profile():
"""
Runs a benchmark
@return: None
"""
for i in range(1, 51):
format_record(i, "HD", ln=CFG_SITE_LANG, verbose=9, search_pattern=[])
return
if __name__ == "__main__":
import profile
import pstats
#bf_profile()
profile.run('bf_profile()', "bibformat_profile")
p = pstats.Stats("bibformat_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
| gpl-2.0 |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/genericpath.py | 246 | 3015 | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except os.error:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path ono systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except os.error:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, ''
| gpl-2.0 |
0k/OpenUpgrade | addons/pos_discount/__init__.py | 315 | 1072 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import discount
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yaoxiaoyong/CocosBuilder | CocosBuilder/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 48 | 73263 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSVersion
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def InvertRelativePath(path):
"""Given a relative path like foo/bar, return the inverse relative path:
the path from the relative path back to the origin dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string."""
if not path:
return path
# Only need to handle relative paths into subdirectories for now.
assert '..' not in path, path
depth = len(path.split(os.path.sep))
return os.path.sep.join(['..'] * depth)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, flavor, abs_build_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
abs_build_dir: absolute path to the build directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.flavor = flavor
self.abs_build_dir = abs_build_dir
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
self.build_to_base = os.path.join(InvertRelativePath(build_dir), base_dir)
# Relative path from base dir to build dir.
self.base_to_build = os.path.join(InvertRelativePath(base_dir), build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def WriteSpec(self, spec, config_name, generator_flags,
case_sensitive_filesystem):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
target_platform = self.msvs_settings.GetTargetPlatform(config_name)
self.ninja.variable('arch', self.win_env[target_platform])
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = spec.get('sources', []) + extra_sources
if sources:
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
config_name, config, sources, compile_depends_stamp, pch,
case_sensitive_filesystem)
# Some actions/rules output 'sources' that are already object files.
link_deps += [self.GypPathToNinja(f)
for f in sources if f.endswith(self.obj_ext)]
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', []) + \
extra_mac_bundle_resources
self.WriteMacBundleResources(mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
extra_bindings.append(('dirname', cygwin_munge(dirname)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.xcode_settings, map(self.GypPathToNinja, resources)):
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(intermediate_plist, 'infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
self.ninja.build(out, 'mac_tool', info_plist,
variables=[('mactool_cmd', 'copy-info-plist'),
('env', env)])
bundle_depends.append(out)
def WriteSources(self, config_name, config, sources, predepends,
precompiled_header, case_sensitive_filesystem):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
self.WriteVariableList('pdbname', [self.name + '.pdb'])
self.WriteVariableList('pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
defines = config.get('defines', []) + extra_defines
self.WriteVariableList('defines', [Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList('rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList('includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands()
if self.flavor == 'mac':
self.WriteVariableList('cflags_pch_c',
[precompiled_header.GetInclude('c')])
self.WriteVariableList('cflags_pch_cc',
[precompiled_header.GetInclude('cc')])
self.WriteVariableList('cflags_pch_objc',
[precompiled_header.GetInclude('m')])
self.WriteVariableList('cflags_pch_objcc',
[precompiled_header.GetInclude('mm')])
self.WriteVariableList('cflags', map(self.ExpandSpecial, cflags))
self.WriteVariableList('cflags_c', map(self.ExpandSpecial, cflags_c))
self.WriteVariableList('cflags_cc', map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList('cflags_objc', map(self.ExpandSpecial,
cflags_objc))
self.WriteVariableList('cflags_objcc', map(self.ExpandSpecial,
cflags_objcc))
self.ninja.newline()
outputs = []
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetTargetPlatform(config_name) == 'Win32'):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
# Ninja's depfile handling gets confused when the case of a filename
# changes on a case-insensitive file system. To work around that, always
# convert .o filenames to lowercase on such file systems. See
# https://github.com/martine/ninja/issues/402 for details.
if not case_sensitive_filesystem:
output = output.lower()
implicit = precompiled_header.GetObjDependencies([input], [output])
self.ninja.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends)
outputs.append(output)
self.WritePchTargets(pch_commands)
self.ninja.newline()
return outputs
def WritePchTargets(self, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
if self.flavor == 'win':
map.update({'c': 'cc_pch', 'cc': 'cxx_pch'})
cmd = map.get(lang)
self.ninja.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
extra_link_deps |= set(target.component_objs)
elif self.flavor == 'win' and target.import_lib:
extra_link_deps.add(target.import_lib)
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
extra_link_deps.add(target.binary)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
link_deps.extend(list(extra_link_deps))
extra_bindings = []
if self.is_mac_bundle:
output = self.ComputeMacBundleBinaryOutput()
else:
output = self.ComputeOutput(spec)
extra_bindings.append(('postbuilds',
self.GetPostbuildCommand(spec, output, output)))
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja)
elif self.flavor == 'win':
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
self.WriteVariableList(
'libflags', gyp.common.uniquer(map(self.ExpandSpecial, libflags)))
is_executable = spec['type'] == 'executable'
manifest_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, manifest_files = self.msvs_settings.GetLdflags(config_name,
self.GypPathToNinja, self.ExpandSpecial, manifest_name, is_executable)
self.WriteVariableList('manifests', manifest_files)
else:
ldflags = config.get('ldflags', [])
self.WriteVariableList('ldflags',
gyp.common.uniquer(map(self.ExpandSpecial,
ldflags)))
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList('libs', libraries)
self.target.binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('dll', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
output = [output, self.target.import_lib]
else:
output = [output, output + '.TOC']
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
self.ninja.build(output, command, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
if spec['type'] == 'none':
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
variables = []
postbuild = self.GetPostbuildCommand(
spec, self.target.binary, self.target.binary)
if postbuild:
variables.append(('postbuilds', postbuild))
if self.xcode_settings:
variables.append(('libtool_flags',
self.xcode_settings.GetLibtoolflags(config_name)))
if (self.flavor not in ('mac', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps, variables=variables)
else:
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
postbuild = self.GetPostbuildCommand(spec, output, self.target.binary,
is_command_start=not package_framework)
variables = []
if postbuild:
variables.append(('postbuilds', postbuild))
if package_framework:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def GetPostbuildCommand(self, spec, output, output_binary,
is_command_start=False):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
target_postbuilds = self.xcode_settings.GetTargetPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
quiet=True)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
postbuilds = target_postbuilds + postbuilds
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (F=0; ' + \
' '.join([ninja_syntax.escape(command) + ' || F=$$?;'
for command in postbuilds])
command_string = (commands + ' exit $$F); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = self.ExpandSpecial(generator_default_variables['PRODUCT_DIR'])
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
assert self.is_mac_bundle
path = self.ExpandSpecial(generator_default_variables['PRODUCT_DIR'])
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, type=None):
"""Compute the path for the final output of the spec."""
assert not self.is_mac_bundle or type
if not type:
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, var, values):
assert not isinstance(values, str)
if values is None:
values = []
self.ninja.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
generator_flags = params.get('generator_flags', {})
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, mode)
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja = ninja_syntax.Writer(
OpenOutput(os.path.join(toplevel_build, 'build.ninja')),
width=120)
case_sensitive_filesystem = not os.path.exists(
os.path.join(toplevel_build, 'BUILD.NINJA'))
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
ld_host = '$ld'
else:
cc = 'gcc'
cxx = 'g++'
ld = '$cxx'
ld_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = InvertRelativePath(build_dir)
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
flock = 'flock'
if flavor == 'mac':
flock = './gyp-mac-tool flock'
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', cc)
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', cxx)
ld = GetEnvironFallback(['LD_target', 'LD'], ld)
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
master_ninja.variable('use_dep_database', '1')
else:
master_ninja.variable('ld', flock + ' linker.lock ' + ld)
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
ld_host = GetEnvironFallback(['LD_host'], ld_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host', cc_host)
master_ninja.variable('cxx_host', cxx_host)
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', flock + ' linker.lock ' + ld_host)
master_ninja.newline()
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d')
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d')
else:
# Template for compile commands mostly shared between compiling files
# and generating PCH. In the case of PCH, the "output" is specified by /Fp
# rather than /Fo (for object files), but we still need to specify an /Fo
# when compiling PCH.
cc_template = ('ninja -t msvc -r . -o $out -e $arch '
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp '
'$cflags_pch_c /c $in %(outspec)s /Fd$pdbname ')
cxx_template = ('ninja -t msvc -r . -o $out -e $arch '
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp '
'$cflags_pch_cc /c $in %(outspec)s $pchobj /Fd$pdbname ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_template % {'outspec': '/Fo$out'},
depfile='$out.d',
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c')
master_ninja.rule(
'cc_pch',
description='CC PCH $out',
command=cc_template % {'outspec': '/Fp$out /Fo$out.obj'},
depfile='$out.d',
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c')
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_template % {'outspec': '/Fo$out'},
depfile='$out.d',
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc')
master_ninja.rule(
'cxx_pch',
description='CXX PCH $out',
command=cxx_template % {'outspec': '/Fp$out /Fo$out.obj'},
depfile='$out.d',
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc')
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}))
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group $libs'}))
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out -Wl,-rpath=\$$ORIGIN/lib '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'))
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
dlldesc = 'LINK(DLL) $dll'
dllcmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo $implibflag /DLL /OUT:$dll '
'/PDB:$dll.pdb @$dll.rsp' % sys.executable)
dllcmd += (' && %s gyp-win-tool manifest-wrapper $arch '
'$mt -nologo -manifest $manifests -out:$dll.manifest' %
sys.executable)
master_ninja.rule('solink', description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
master_ninja.rule('solink_module', description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
master_ninja.rule(
'link',
description='LINK $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo /OUT:$out /PDB:$out.pdb @$out.rsp && '
'%s gyp-win-tool manifest-wrapper $arch '
'$mt -nologo -manifest $manifests -out:$out.manifest' %
(sys.executable, sys.executable)),
rspfile='$out.rsp',
rspfile_content='$in_newline $libs $ldflags')
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d')
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d')
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': '$ld -shared $ldflags -o $lib %(suffix)s',
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}))
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}))
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'))
master_ninja.rule(
'infoplist',
description='INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
abs_build_dir = os.path.abspath(toplevel_build)
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
OpenOutput(os.path.join(toplevel_build, output_file)),
flavor, abs_build_dir=abs_build_dir)
master_ninja.subninja(output_file)
target = writer.WriteSpec(
spec, config_name, generator_flags, case_sensitive_filesystem)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
qma/pants | tests/python/pants_test/backend/python/tasks/checkstyle/test_trailing_whitespace.py | 2 | 2007 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.tasks.checkstyle.trailing_whitespace import TrailingWhitespace
from pants_test.backend.python.tasks.checkstyle.plugin_test_base import CheckstylePluginTestBase
class TrailingWhitespaceTest(CheckstylePluginTestBase):
plugin_type = TrailingWhitespace
def test_exception_map(self):
for test_input, results in [
([9,0,0], False),
([3,0,1], False),
([3,17,17], False),
([3,18,18], True),
([3,18,10000], True), # """ continued strings have no ends
([6,8,8], False),
([6,19,19], True),
([6,19,23], True),
([6,23,25], False), # (" " continued have string termination
]:
tw = self.get_plugin("""
test_string_001 = ""
test_string_002 = " "
test_string_003 = \"\"\"
foo{}
\"\"\"
test_string_006 = (" "
" ")
class Foo(object):
pass
# comment 010
test_string_011 = ''
# comment 012
# comment 013
""".format(' ')) # Add the trailing whitespace with format, so that IDEs don't remove it.
self.assertEqual(0, len(list(tw.nits())))
self.assertEqual(results, bool(tw.has_exception(*test_input)))
def test_continuation_with_exception(self):
statement = """
test_string_001 = (" "{}
" ")
""".format(' ') # Add the trailing whitespace with format, so that IDEs don't remove it.
self.assertNit(statement, 'T200')
def test_trailing_slash(self):
statement = """
foo = \\
123
bar = \"\"\"
bin/bash foo \\
bar \\
baz
\"\"\"
"""
self.assertNit(statement, 'T201', expected_line_number=1)
| apache-2.0 |
GauravSahu/odoo | addons/stock_dropshipping/stock_dropshipping.py | 160 | 2228 | # coding: utf-8
from openerp import models, api, _
from openerp.exceptions import Warning
class sale_order_line(models.Model):
_inherit = 'sale.order.line'
@api.multi
def _check_routing(self, product, warehouse):
""" skip stock verification if the route goes from supplier to customer
As the product never goes in stock, no need to verify it's availibility
"""
res = super(sale_order_line, self)._check_routing(product, warehouse)
if not res:
for line in self:
for pull_rule in line.route_id.pull_ids:
if (pull_rule.picking_type_id.default_location_src_id.usage == 'supplier' and
pull_rule.picking_type_id.default_location_dest_id.usage == 'customer'):
res = True
break
return res
class purchase_order(models.Model):
_inherit = 'purchase.order'
@api.one
def _check_invoice_policy(self):
if self.invoice_method == 'picking' and self.location_id.usage == 'customer':
for proc in self.order_line.mapped('procurement_ids'):
if proc.sale_line_id.order_id.order_policy == 'picking':
raise Warning(_('In the case of a dropship route, it is not possible to have an invoicing control set on "Based on incoming shipments" and a sale order with an invoice creation on "On Delivery Order"'))
@api.multi
def wkf_confirm_order(self):
""" Raise a warning to forbid to have both purchase and sale invoices
policies at delivery in dropshipping. As it is not implemented.
This check can be disabled setting 'no_invoice_policy_check' in context
"""
if not self.env.context.get('no_invoice_policy_check'):
self._check_invoice_policy()
super(purchase_order, self).wkf_confirm_order()
class procurement_order(models.Model):
_inherit = 'procurement.order'
@api.model
def update_origin_po(self, po, proc):
super(procurement_order, self).update_origin_po(po, proc)
if proc.sale_line_id and not (proc.origin in po.origin):
po.sudo().write({'origin': po.origin+', '+proc.origin})
| agpl-3.0 |
odoomrp/server-tools | super_calendar/__openerp__.py | 9 | 2125 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) All rights reserved:
# (c) 2012-2015 Agile Business Group sagl (<http://www.agilebg.com>)
# Lorenzo Battistini <lorenzo.battistini@agilebg.com>
# (c) 2012 Domsense srl (<http://www.domsense.com>)
# (c) 2015 Anubía, soluciones en la nube,SL (http://www.anubia.es)
# Alejandro Santana <alejandrosantana@anubia.es>
# (c) 2015 Savoir-faire Linux <http://www.savoirfairelinux.com>)
# Agathe Mollé <agathe.molle@savoirfairelinux.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses
#
##############################################################################
{
'name': 'Super Calendar',
'version': '0.2',
'category': 'Generic Modules/Others',
'summary': 'This module allows to create configurable calendars.',
'author': ('Agile Business Group, '
'Alejandro Santana, '
'Agathe Mollé, '
'Odoo Community Association (OCA)'),
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
'depends': [
'base',
'web_calendar',
],
'data': [
'views/super_calendar_view.xml',
'data/cron_data.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': [],
'installable': True,
'application': True,
'auto_install': False,
}
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.